repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
bastianbeischer/astropy
|
[
"6054cc78b22a6bcd4b37fdfdec02f5331b957355",
"6054cc78b22a6bcd4b37fdfdec02f5331b957355",
"6054cc78b22a6bcd4b37fdfdec02f5331b957355",
"6054cc78b22a6bcd4b37fdfdec02f5331b957355"
] |
[
"astropy/io/votable/tests/converter_test.py",
"astropy/modeling/tests/test_quantities_fitting.py",
"astropy/table/table.py",
"astropy/io/misc/tests/test_hdf5.py"
] |
[
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport io\n\n# THIRD-PARTY\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport pytest\n\n# LOCAL\nfrom astropy.io.votable import converters\nfrom astropy.io.votable import exceptions\nfrom astropy.io.votable import tree\n\nfrom astropy.io.votable.table import parse_single_table\nfrom astropy.tests.helper import raises, catch_warnings\nfrom astropy.utils.data import get_pkg_data_filename\n\n\n@raises(exceptions.E13)\ndef test_invalid_arraysize():\n field = tree.Field(\n None, name='broken', datatype='char', arraysize='foo')\n converters.get_converter(field)\n\n\ndef test_oversize_char():\n config = {'verify': 'exception'}\n with catch_warnings(exceptions.W47) as w:\n field = tree.Field(\n None, name='c', datatype='char',\n config=config)\n c = converters.get_converter(field, config=config)\n assert len(w) == 1\n\n with catch_warnings(exceptions.W46) as w:\n c.parse(\"XXX\")\n assert len(w) == 1\n\n\ndef test_char_mask():\n config = {'verify': 'exception'}\n field = tree.Field(None, name='c', arraysize='1', datatype='char',\n config=config)\n c = converters.get_converter(field, config=config)\n assert c.output(\"Foo\", True) == ''\n\n\ndef test_oversize_unicode():\n config = {'verify': 'exception'}\n with catch_warnings(exceptions.W46) as w:\n field = tree.Field(\n None, name='c2', datatype='unicodeChar',\n config=config)\n c = converters.get_converter(field, config=config)\n\n c.parse(\"XXX\")\n assert len(w) == 1\n\n\ndef test_unicode_mask():\n config = {'verify': 'exception'}\n field = tree.Field(None, name='c', arraysize='1', datatype='unicodeChar',\n config=config)\n c = converters.get_converter(field, config=config)\n assert c.output(\"Foo\", True) == ''\n\n\ndef test_unicode_as_char():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='unicode_in_char', datatype='char',\n arraysize='*', config=config)\n c = converters.get_converter(field, config=config)\n\n # Test parsing.\n c.parse('XYZ') # ASCII succeeds\n with pytest.warns(\n exceptions.W55,\n match=r'FIELD \\(unicode_in_char\\) has datatype=\"char\" but contains non-ASCII value'):\n c.parse(\"zła\") # non-ASCII\n\n # Test output.\n c.output('XYZ', False) # ASCII str succeeds\n c.output(b'XYZ', False) # ASCII bytes succeeds\n value = 'zła'\n value_bytes = value.encode('utf-8')\n with pytest.warns(\n exceptions.E24,\n match=r'E24: Attempt to write non-ASCII value'):\n c.output(value, False) # non-ASCII str raises\n with pytest.warns(\n exceptions.E24,\n match=r'E24: Attempt to write non-ASCII value'):\n c.output(value_bytes, False) # non-ASCII bytes raises\n\n\ndef test_unicode_as_char_binary():\n config = {'verify': 'exception'}\n\n field = tree.Field(\n None, name='unicode_in_char', datatype='char',\n arraysize='*', config=config)\n c = converters.get_converter(field, config=config)\n c._binoutput_var('abc', False) # ASCII succeeds\n with pytest.raises(exceptions.E24, match=r\"E24: Attempt to write non-ASCII value\"):\n c._binoutput_var('zła', False)\n\n field = tree.Field(\n None, name='unicode_in_char', datatype='char',\n arraysize='3', config=config)\n c = converters.get_converter(field, config=config)\n c._binoutput_fixed('xyz', False)\n with pytest.raises(exceptions.E24, match=r\"E24: Attempt to write non-ASCII value\"):\n c._binoutput_fixed('zła', False)\n\n\n@raises(exceptions.E02)\ndef test_wrong_number_of_elements():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='c', datatype='int', arraysize='2x3*',\n config=config)\n c = converters.get_converter(field, config=config)\n c.parse(\"2 3 4 5 6\")\n\n\n@raises(ValueError)\ndef test_float_mask():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='c', datatype='float',\n config=config)\n c = converters.get_converter(field, config=config)\n assert c.parse('') == (c.null, True)\n c.parse('null')\n\n\ndef test_float_mask_permissive():\n config = {'verify': 'ignore'}\n field = tree.Field(\n None, name='c', datatype='float',\n config=config)\n\n # config needs to be also passed into parse() to work.\n # https://github.com/astropy/astropy/issues/8775\n c = converters.get_converter(field, config=config)\n assert c.parse('null', config=config) == (c.null, True)\n\n\n@raises(exceptions.E02)\ndef test_complex_array_vararray():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='c', datatype='floatComplex', arraysize='2x3*',\n config=config)\n c = converters.get_converter(field, config=config)\n c.parse(\"2 3 4 5 6\")\n\n\ndef test_complex_array_vararray2():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='c', datatype='floatComplex', arraysize='2x3*',\n config=config)\n c = converters.get_converter(field, config=config)\n x = c.parse(\"\")\n assert len(x[0]) == 0\n\n\ndef test_complex_array_vararray3():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='c', datatype='doubleComplex', arraysize='2x3*',\n config=config)\n c = converters.get_converter(field, config=config)\n x = c.parse(\"1 2 3 4 5 6 7 8 9 10 11 12\")\n assert len(x) == 2\n assert np.all(x[0][0][0] == complex(1, 2))\n\n\ndef test_complex_vararray():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='c', datatype='doubleComplex', arraysize='*',\n config=config)\n c = converters.get_converter(field, config=config)\n x = c.parse(\"1 2 3 4\")\n assert len(x) == 2\n assert x[0][0] == complex(1, 2)\n\n\n@raises(exceptions.E03)\ndef test_complex():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='c', datatype='doubleComplex',\n config=config)\n c = converters.get_converter(field, config=config)\n c.parse(\"1 2 3\")\n\n\n@raises(exceptions.E04)\ndef test_bit():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='c', datatype='bit',\n config=config)\n c = converters.get_converter(field, config=config)\n c.parse(\"T\")\n\n\ndef test_bit_mask():\n config = {'verify': 'exception'}\n with catch_warnings(exceptions.W39) as w:\n field = tree.Field(\n None, name='c', datatype='bit',\n config=config)\n c = converters.get_converter(field, config=config)\n c.output(True, True)\n assert len(w) == 1\n\n\n@raises(exceptions.E05)\ndef test_boolean():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='c', datatype='boolean',\n config=config)\n c = converters.get_converter(field, config=config)\n c.parse('YES')\n\n\ndef test_boolean_array():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='c', datatype='boolean', arraysize='*',\n config=config)\n c = converters.get_converter(field, config=config)\n r, mask = c.parse('TRUE FALSE T F 0 1')\n assert_array_equal(r, [True, False, True, False, False, True])\n\n\n@raises(exceptions.E06)\ndef test_invalid_type():\n config = {'verify': 'exception'}\n field = tree.Field(\n None, name='c', datatype='foobar',\n config=config)\n converters.get_converter(field, config=config)\n\n\ndef test_precision():\n config = {'verify': 'exception'}\n\n field = tree.Field(\n None, name='c', datatype='float', precision=\"E4\",\n config=config)\n c = converters.get_converter(field, config=config)\n assert c.output(266.248, False) == '266.2'\n\n field = tree.Field(\n None, name='c', datatype='float', precision=\"F4\",\n config=config)\n c = converters.get_converter(field, config=config)\n assert c.output(266.248, False) == '266.2480'\n\n\n@raises(exceptions.W51)\ndef test_integer_overflow():\n config = {'verify': 'exception'}\n\n field = tree.Field(\n None, name='c', datatype='int', config=config)\n c = converters.get_converter(field, config=config)\n c.parse('-2208988800', config=config)\n\n\ndef test_float_default_precision():\n config = {'verify': 'exception'}\n\n field = tree.Field(\n None, name='c', datatype='float', arraysize=\"4\",\n config=config)\n c = converters.get_converter(field, config=config)\n assert (c.output([1, 2, 3, 8.9990234375], [False, False, False, False]) ==\n '1 2 3 8.9990234375')\n\n\ndef test_vararray():\n votable = tree.VOTableFile()\n resource = tree.Resource()\n votable.resources.append(resource)\n table = tree.Table(votable)\n resource.tables.append(table)\n\n tabarr = []\n heads = ['headA', 'headB', 'headC']\n types = [\"char\", \"double\", \"int\"]\n\n vals = [[\"A\", 1.0, 2],\n [\"B\", 2.0, 3],\n [\"C\", 3.0, 4]]\n for i in range(len(heads)):\n tabarr.append(tree.Field(\n votable, name=heads[i], datatype=types[i], arraysize=\"*\"))\n\n table.fields.extend(tabarr)\n table.create_arrays(len(vals))\n for i in range(len(vals)):\n values = tuple(vals[i])\n table.array[i] = values\n buff = io.BytesIO()\n votable.to_xml(buff)\n\n\ndef test_gemini_v1_2():\n '''\n see Pull Request 4782 or Issue 4781 for details\n '''\n table = parse_single_table(get_pkg_data_filename('data/gemini.xml'))\n assert table is not None\n\n tt = table.to_table()\n assert tt['access_url'][0] == (\n 'http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/data/pub/GEMINI/'\n 'S20120515S0064?runid=bx9b1o8cvk1qesrt')\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nTests that relate to fitting models with quantity parameters\n\"\"\"\n# pylint: disable=invalid-name, no-member\nimport numpy as np\nimport pytest\n\nfrom astropy import units as u\nfrom astropy.units import UnitsError\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.utils import NumpyRNGContext\nfrom astropy.modeling import fitting\nfrom astropy.modeling import models\nfrom astropy.modeling.core import Fittable1DModel\nfrom astropy.modeling.parameters import Parameter\n\ntry:\n from scipy import optimize # noqa\n HAS_SCIPY = True\nexcept ImportError:\n HAS_SCIPY = False\n\n\n# Fitting should be as intuitive as possible to the user. Essentially, models\n# and fitting should work without units, but if one has units, the other should\n# have units too, and the resulting fitted parameters will also have units.\n\n\ndef _fake_gaussian_data():\n\n # Generate fake data\n with NumpyRNGContext(12345):\n x = np.linspace(-5., 5., 2000)\n y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)\n y += np.random.normal(0., 0.2, x.shape)\n\n # Attach units to data\n x = x * u.m\n y = y * u.Jy\n\n return x, y\n\n\nbad_compound_models_no_units = [\n models.Linear1D() + models.Gaussian1D() | models.Scale(),\n models.Linear1D() + models.Gaussian1D() | models.Shift()\n]\n\ncompound_models_no_units = [\n models.Linear1D() + models.Gaussian1D() + models.Gaussian1D()\n]\n\n\nclass CustomInputNamesModel(Fittable1DModel):\n\n n_inputs = 1\n n_outputs = 1\n\n a = Parameter(default=1.0)\n b = Parameter(default=1.0)\n\n def __init__(self, a=a, b=b):\n super().__init__(a=a, b=b)\n self.inputs = ('inn',)\n self.outputs = ('out',)\n\n @staticmethod\n def evaluate(inn, a, b):\n return a * inn + b\n\n @property\n def input_units(self):\n if self.a.unit is None and self.b.unit is None:\n return None\n else:\n return {'inn': self.b.unit / self.a.unit}\n\n def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):\n return {'a': outputs_unit['out'] / inputs_unit['inn'],\n 'b': outputs_unit['out']\n }\n\n\ndef models_with_custom_names():\n line = models.Linear1D(1 * u.m / u.s, 2 * u.m)\n line.inputs = ('inn',)\n line.outputs = ('out',)\n\n custom_names_model = CustomInputNamesModel(1 * u.m / u.s, 2 * u.m)\n return [line, custom_names_model]\n\n\[email protected]('not HAS_SCIPY')\ndef test_fitting_simple():\n\n x, y = _fake_gaussian_data()\n\n # Fit the data using a Gaussian with units\n g_init = models.Gaussian1D()\n fit_g = fitting.LevMarLSQFitter()\n g = fit_g(g_init, x, y)\n\n # TODO: update actual numerical results once implemented, but these should\n # be close to the values below.\n assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)\n assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)\n assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)\n\n\[email protected]('not HAS_SCIPY')\ndef test_fitting_with_initial_values():\n\n x, y = _fake_gaussian_data()\n\n # Fit the data using a Gaussian with units\n g_init = models.Gaussian1D(amplitude=1. * u.mJy,\n mean=3 * u.cm,\n stddev=2 * u.mm)\n fit_g = fitting.LevMarLSQFitter()\n g = fit_g(g_init, x, y)\n\n # TODO: update actual numerical results once implemented, but these should\n # be close to the values below.\n assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)\n assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)\n assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)\n\n\[email protected]('not HAS_SCIPY')\ndef test_fitting_missing_data_units():\n \"\"\"\n Raise an error if the model has units but the data doesn't\n \"\"\"\n class UnorderedGaussian1D(models.Gaussian1D):\n # Parameters are ordered differently here from Gaussian1D\n # to ensure the order does not break functionality.\n def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):\n return {'amplitude': outputs_unit['y'],\n 'mean': inputs_unit['x'],\n 'stddev': inputs_unit['x']}\n\n g_init = UnorderedGaussian1D(amplitude=1. * u.mJy,\n mean=3 * u.cm,\n stddev=2 * u.mm)\n fit_g = fitting.LevMarLSQFitter()\n\n # We define flux unit so that conversion fails at wavelength unit.\n # This is because the order of parameter unit conversion seems to\n # follow the order defined in _parameter_units_for_data_units method.\n with pytest.raises(UnitsError) as exc:\n fit_g(g_init, [1, 2, 3],\n [4, 5, 6] * (u.erg / (u.s * u.cm * u.cm * u.Hz)))\n assert exc.value.args[0] == (\"'cm' (length) and '' (dimensionless) are \"\n \"not convertible\")\n\n with pytest.raises(UnitsError) as exc:\n fit_g(g_init, [1, 2, 3] * u.m, [4, 5, 6])\n assert exc.value.args[0] == (\"'mJy' (spectral flux density) and '' \"\n \"(dimensionless) are not convertible\")\n\n\[email protected]('not HAS_SCIPY')\ndef test_fitting_missing_model_units():\n \"\"\"\n Proceed if the data has units but the model doesn't\n \"\"\"\n\n x, y = _fake_gaussian_data()\n\n g_init = models.Gaussian1D(amplitude=1., mean=3, stddev=2)\n fit_g = fitting.LevMarLSQFitter()\n g = fit_g(g_init, x, y)\n\n assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)\n assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)\n assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)\n\n g_init = models.Gaussian1D(amplitude=1., mean=3 * u.m, stddev=2 * u.m)\n fit_g = fitting.LevMarLSQFitter()\n g = fit_g(g_init, x, y)\n\n assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)\n assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)\n assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)\n\n\[email protected]('not HAS_SCIPY')\ndef test_fitting_incompatible_units():\n \"\"\"\n Raise an error if the data and model have incompatible units\n \"\"\"\n\n g_init = models.Gaussian1D(amplitude=1. * u.Jy,\n mean=3 * u.m,\n stddev=2 * u.cm)\n fit_g = fitting.LevMarLSQFitter()\n\n with pytest.raises(UnitsError) as exc:\n fit_g(g_init, [1, 2, 3] * u.Hz, [4, 5, 6] * u.Jy)\n assert exc.value.args[0] == (\"'Hz' (frequency) and 'm' (length) are not convertible\")\n\n\[email protected]('not HAS_SCIPY')\[email protected](r'ignore:The fit may be unsuccessful.*')\[email protected]('model', compound_models_no_units)\ndef test_compound_without_units(model):\n x = np.linspace(-5, 5, 10) * u.Angstrom\n with NumpyRNGContext(12345):\n y = np.random.sample(10)\n\n fitter = fitting.LevMarLSQFitter()\n res_fit = fitter(model, x, y * u.Hz)\n for param_name in res_fit.param_names:\n print(getattr(res_fit, param_name))\n assert all([res_fit[i]._has_units for i in range(3)])\n z = res_fit(x)\n assert isinstance(z, u.Quantity)\n\n res_fit = fitter(model, np.arange(10) * u.Unit('Angstrom'), y)\n assert all([res_fit[i]._has_units for i in range(3)])\n z = res_fit(x)\n assert isinstance(z, np.ndarray)\n\n\[email protected]('not HAS_SCIPY')\ndef test_compound_fitting_with_units():\n x = np.linspace(-5, 5, 15) * u.Angstrom\n y = np.linspace(-5, 5, 15) * u.Angstrom\n\n fitter = fitting.LevMarLSQFitter()\n m = models.Gaussian2D(10*u.Hz,\n 3*u.Angstrom, 4*u.Angstrom,\n 1*u.Angstrom, 2*u.Angstrom)\n p = models.Planar2D(3*u.Hz/u.Angstrom, 4*u.Hz/u.Angstrom, 1*u.Hz)\n model = m + p\n\n z = model(x, y)\n res = fitter(model, x, y, z)\n assert isinstance(res(x, y), np.ndarray)\n assert all([res[i]._has_units for i in range(2)])\n\n model = models.Gaussian2D() + models.Planar2D()\n res = fitter(model, x, y, z)\n assert isinstance(res(x, y), np.ndarray)\n assert all([res[i]._has_units for i in range(2)])\n\n\[email protected]('not HAS_SCIPY')\[email protected]('model', bad_compound_models_no_units)\ndef test_bad_compound_without_units(model):\n with pytest.raises(ValueError):\n x = np.linspace(-5, 5, 10) * u.Angstrom\n with NumpyRNGContext(12345):\n y = np.random.sample(10)\n\n fitter = fitting.LevMarLSQFitter()\n res_fit = fitter(model, x, y * u.Hz)\n\n\[email protected]('not HAS_SCIPY')\[email protected](r'ignore:Model is linear in parameters*')\[email protected]('model', models_with_custom_names())\ndef test_fitting_custom_names(model):\n \"\"\" Tests fitting of models with custom inputs and outsputs names.\"\"\"\n\n x = np.linspace(0, 10, 100) * u.s\n y = model(x)\n fitter = fitting.LevMarLSQFitter()\n new_model = fitter(model, x, y)\n for param_name in model.param_names:\n assert_quantity_allclose(getattr(new_model, param_name).quantity,\n getattr(model, param_name).quantity)\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom .index import TableIndices, TableLoc, TableILoc, TableLocIndices\n\nimport sys\nfrom collections import OrderedDict, defaultdict\nfrom collections.abc import Mapping\nimport warnings\nfrom copy import deepcopy\nimport types\nimport itertools\n\nimport numpy as np\nfrom numpy import ma\n\nfrom astropy import log\nfrom astropy.units import Quantity, QuantityInfo\nfrom astropy.utils import isiterable, ShapedLikeNDArray\nfrom astropy.utils.console import color_print\nfrom astropy.utils.metadata import MetaData, MetaAttribute\nfrom astropy.utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo\nfrom astropy.utils.decorators import format_doc\nfrom astropy.io.registry import UnifiedReadWriteMethod\n\nfrom . import groups\nfrom .pprint import TableFormatter\nfrom .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,\n col_copy)\nfrom .row import Row\nfrom .np_utils import fix_column_name\nfrom .info import TableInfo\nfrom .index import Index, _IndexModeContext, get_index\nfrom .connect import TableRead, TableWrite\nfrom . import conf\n\n\n_implementation_notes = \"\"\"\nThis string has informal notes concerning Table implementation for developers.\n\nThings to remember:\n\n- Table has customizable attributes ColumnClass, Column, MaskedColumn.\n Table.Column is normally just column.Column (same w/ MaskedColumn)\n but in theory they can be different. Table.ColumnClass is the default\n class used to create new non-mixin columns, and this is a function of\n the Table.masked attribute. Column creation / manipulation in a Table\n needs to respect these.\n\n- Column objects that get inserted into the Table.columns attribute must\n have the info.parent_table attribute set correctly. Beware just dropping\n an object into the columns dict since an existing column may\n be part of another Table and have parent_table set to point at that\n table. Dropping that column into `columns` of this Table will cause\n a problem for the old one so the column object needs to be copied (but\n not necessarily the data).\n\n Currently replace_column is always making a copy of both object and\n data if parent_table is set. This could be improved but requires a\n generic way to copy a mixin object but not the data.\n\n- Be aware of column objects that have indices set.\n\n- `cls.ColumnClass` is a property that effectively uses the `masked` attribute\n to choose either `cls.Column` or `cls.MaskedColumn`.\n\"\"\"\n\n__doctest_skip__ = ['Table.read', 'Table.write', 'Table._read',\n 'Table.convert_bytestring_to_unicode',\n 'Table.convert_unicode_to_bytestring',\n ]\n\n__doctest_requires__ = {'*pandas': ['pandas']}\n\n_pprint_docs = \"\"\"\n {__doc__}\n\n Parameters\n ----------\n max_lines : int or `None`\n Maximum number of lines in table output.\n\n max_width : int or `None`\n Maximum character width of output.\n\n show_name : bool\n Include a header row for column names. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is to show a row\n for units only if one or more columns has a defined value\n for the unit.\n\n show_dtype : bool\n Include a header row for column dtypes. Default is True.\n\n align : str or list or tuple or `None`\n Left/right alignment of columns. Default is right (None) for all\n columns. Other allowed values are '>', '<', '^', and '0=' for\n right, left, centered, and 0-padded, respectively. A list of\n strings can be provided for alignment of tables with multiple\n columns.\n \"\"\"\n\n_pformat_docs = \"\"\"\n {__doc__}\n\n Parameters\n ----------\n max_lines : int or `None`\n Maximum number of rows to output\n\n max_width : int or `None`\n Maximum character width of output\n\n show_name : bool\n Include a header row for column names. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is to show a row\n for units only if one or more columns has a defined value\n for the unit.\n\n show_dtype : bool\n Include a header row for column dtypes. Default is True.\n\n html : bool\n Format the output as an HTML table. Default is False.\n\n tableid : str or `None`\n An ID tag for the table; only used if html is set. Default is\n \"table{id}\", where id is the unique integer id of the table object,\n id(self)\n\n align : str or list or tuple or `None`\n Left/right alignment of columns. Default is right (None) for all\n columns. Other allowed values are '>', '<', '^', and '0=' for\n right, left, centered, and 0-padded, respectively. A list of\n strings can be provided for alignment of tables with multiple\n columns.\n\n tableclass : str or list of str or `None`\n CSS classes for the table; only used if html is set. Default is\n None.\n\n Returns\n -------\n lines : list\n Formatted table as a list of strings.\n \"\"\"\n\n\nclass TableReplaceWarning(UserWarning):\n \"\"\"\n Warning class for cases when a table column is replaced via the\n Table.__setitem__ syntax e.g. t['a'] = val.\n\n This does not inherit from AstropyWarning because we want to use\n stacklevel=3 to show the user where the issue occurred in their code.\n \"\"\"\n pass\n\n\ndef descr(col):\n \"\"\"Array-interface compliant full description of a column.\n\n This returns a 3-tuple (name, type, shape) that can always be\n used in a structured array dtype definition.\n \"\"\"\n col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype\n col_shape = col.shape[1:] if hasattr(col, 'shape') else ()\n return (col.info.name, col_dtype, col_shape)\n\n\ndef has_info_class(obj, cls):\n return hasattr(obj, 'info') and isinstance(obj.info, cls)\n\n\ndef _get_names_from_list_of_dict(rows):\n \"\"\"Return list of column names if ``rows`` is a list of dict that\n defines table data.\n\n If rows is not a list of dict then return None.\n \"\"\"\n if rows is None:\n return None\n\n names = set()\n for row in rows:\n if not isinstance(row, dict):\n return None\n names.update(row)\n return list(names)\n\n\n# Note to future maintainers: when transitioning this to dict\n# be sure to change the OrderedDict ref(s) in Row and in __len__().\n\nclass TableColumns(OrderedDict):\n \"\"\"OrderedDict subclass for a set of columns.\n\n This class enhances item access to provide convenient access to columns\n by name or index, including slice access. It also handles renaming\n of columns.\n\n The initialization argument ``cols`` can be a list of ``Column`` objects\n or any structure that is valid for initializing a Python dict. This\n includes a dict, list of (key, val) tuples or [key, val] lists, etc.\n\n Parameters\n ----------\n cols : dict, list, tuple; optional\n Column objects as data structure that can init dict (see above)\n \"\"\"\n\n def __init__(self, cols={}):\n if isinstance(cols, (list, tuple)):\n # `cols` should be a list of two-tuples, but it is allowed to have\n # columns (BaseColumn or mixins) in the list.\n newcols = []\n for col in cols:\n if has_info_class(col, BaseColumnInfo):\n newcols.append((col.info.name, col))\n else:\n newcols.append(col)\n cols = newcols\n super().__init__(cols)\n\n def __getitem__(self, item):\n \"\"\"Get items from a TableColumns object.\n ::\n\n tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])\n tc['a'] # Column('a')\n tc[1] # Column('b')\n tc['a', 'b'] # <TableColumns names=('a', 'b')>\n tc[1:3] # <TableColumns names=('b', 'c')>\n \"\"\"\n if isinstance(item, str):\n return OrderedDict.__getitem__(self, item)\n elif isinstance(item, (int, np.integer)):\n return self.values()[item]\n elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):\n return self.values()[item.item()]\n elif isinstance(item, tuple):\n return self.__class__([self[x] for x in item])\n elif isinstance(item, slice):\n return self.__class__([self[x] for x in list(self)[item]])\n else:\n raise IndexError('Illegal key or index value for {} object'\n .format(self.__class__.__name__))\n\n def __setitem__(self, item, value, validated=False):\n \"\"\"\n Set item in this dict instance, but do not allow directly replacing an\n existing column unless it is already validated (and thus is certain to\n not corrupt the table).\n\n NOTE: it is easily possible to corrupt a table by directly *adding* a new\n key to the TableColumns attribute of a Table, e.g.\n ``t.columns['jane'] = 'doe'``.\n\n \"\"\"\n if item in self and not validated:\n raise ValueError(\"Cannot replace column '{}'. Use Table.replace_column() instead.\"\n .format(item))\n super().__setitem__(item, value)\n\n def __repr__(self):\n names = (f\"'{x}'\" for x in self.keys())\n return \"<{1} names=({0})>\".format(\",\".join(names), self.__class__.__name__)\n\n def _rename_column(self, name, new_name):\n if name == new_name:\n return\n\n if new_name in self:\n raise KeyError(f\"Column {new_name} already exists\")\n\n mapper = {name: new_name}\n new_names = [mapper.get(name, name) for name in self]\n cols = list(self.values())\n self.clear()\n self.update(list(zip(new_names, cols)))\n\n # Define keys and values for Python 2 and 3 source compatibility\n def keys(self):\n return list(OrderedDict.keys(self))\n\n def values(self):\n return list(OrderedDict.values(self))\n\n def isinstance(self, cls):\n \"\"\"\n Return a list of columns which are instances of the specified classes.\n\n Parameters\n ----------\n cls : class or tuple of classes\n Column class (including mixin) or tuple of Column classes.\n\n Returns\n -------\n col_list : list of Columns\n List of Column objects which are instances of given classes.\n \"\"\"\n cols = [col for col in self.values() if isinstance(col, cls)]\n return cols\n\n def not_isinstance(self, cls):\n \"\"\"\n Return a list of columns which are not instances of the specified classes.\n\n Parameters\n ----------\n cls : class or tuple of classes\n Column class (including mixin) or tuple of Column classes.\n\n Returns\n -------\n col_list : list of Columns\n List of Column objects which are not instances of given classes.\n \"\"\"\n cols = [col for col in self.values() if not isinstance(col, cls)]\n return cols\n\n\nclass TableReadWrite:\n def __get__(self, instance, owner_cls):\n if instance is None:\n # This is an unbound descriptor on the class\n info = self\n info._parent_cls = owner_cls\n else:\n info = instance.__dict__.get('info')\n if info is None:\n info = instance.__dict__['info'] = self.__class__(bound=True)\n info._parent = instance\n return info\n\n\nclass Table:\n \"\"\"A class to represent tables of heterogeneous data.\n\n `~astropy.table.Table` provides a class for heterogeneous tabular data.\n A key enhancement provided by the `~astropy.table.Table` class over\n e.g. a `numpy` structured array is the ability to easily modify the\n structure of the table by adding or removing columns, or adding new\n rows of data. In addition table and column metadata are fully supported.\n\n `~astropy.table.Table` differs from `~astropy.nddata.NDData` by the\n assumption that the input data consists of columns of homogeneous data,\n where each column has a unique identifier and may contain additional\n metadata such as the data unit, format, and description.\n\n See also: http://docs.astropy.org/en/stable/table/\n\n Parameters\n ----------\n data : numpy ndarray, dict, list, Table, or table-like object, optional\n Data to initialize table.\n masked : bool, optional\n Specify whether the table is masked.\n names : list, optional\n Specify column names.\n dtype : list, optional\n Specify column data types.\n meta : dict, optional\n Metadata associated with the table.\n copy : bool, optional\n Copy the input data. If the input is a Table the ``meta`` is always\n copied regardless of the ``copy`` parameter.\n Default is True.\n rows : numpy ndarray, list of lists, optional\n Row-oriented data for table instead of ``data`` argument.\n copy_indices : bool, optional\n Copy any indices in the input data. Default is True.\n units : list, dict, optional\n List or dict of units to apply to columns.\n descriptions : list, dict, optional\n List or dict of descriptions to apply to columns.\n **kwargs : dict, optional\n Additional keyword args when converting table-like object.\n \"\"\"\n\n meta = MetaData(copy=False)\n\n # Define class attributes for core container objects to allow for subclass\n # customization.\n Row = Row\n Column = Column\n MaskedColumn = MaskedColumn\n TableColumns = TableColumns\n TableFormatter = TableFormatter\n\n # Unified I/O read and write methods from .connect\n read = UnifiedReadWriteMethod(TableRead)\n write = UnifiedReadWriteMethod(TableWrite)\n\n def as_array(self, keep_byteorder=False, names=None):\n \"\"\"\n Return a new copy of the table in the form of a structured np.ndarray or\n np.ma.MaskedArray object (as appropriate).\n\n Parameters\n ----------\n keep_byteorder : bool, optional\n By default the returned array has all columns in native byte\n order. However, if this option is `True` this preserves the\n byte order of all columns (if any are non-native).\n\n names : list, optional:\n List of column names to include for returned structured array.\n Default is to include all table columns.\n\n Returns\n -------\n table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked)\n Copy of table as a numpy structured array\n \"\"\"\n masked = self.masked or self.has_masked_columns or self.has_masked_values\n empty_init = ma.empty if masked else np.empty\n if len(self.columns) == 0:\n return empty_init(0, dtype=None)\n\n sys_byteorder = ('>', '<')[sys.byteorder == 'little']\n native_order = ('=', sys_byteorder)\n\n dtype = []\n\n cols = self.columns.values()\n\n if names is not None:\n cols = [col for col in cols if col.info.name in names]\n\n for col in cols:\n col_descr = descr(col)\n byteorder = col.info.dtype.byteorder\n\n if not keep_byteorder and byteorder not in native_order:\n new_dt = np.dtype(col_descr[1]).newbyteorder('=')\n col_descr = (col_descr[0], new_dt, col_descr[2])\n\n dtype.append(col_descr)\n\n data = empty_init(len(self), dtype=dtype)\n for col in cols:\n # When assigning from one array into a field of a structured array,\n # Numpy will automatically swap those columns to their destination\n # byte order where applicable\n data[col.info.name] = col\n\n # For masked out, masked mixin columns need to set output mask attribute.\n if masked and has_info_class(col, MixinInfo) and hasattr(col, 'mask'):\n data[col.info.name].mask = col.mask\n\n return data\n\n def __init__(self, data=None, masked=False, names=None, dtype=None,\n meta=None, copy=True, rows=None, copy_indices=True,\n units=None, descriptions=None,\n **kwargs):\n\n # Set up a placeholder empty table\n self._set_masked(masked)\n self.columns = self.TableColumns()\n self.formatter = self.TableFormatter()\n self._copy_indices = True # copy indices from this Table by default\n self._init_indices = copy_indices # whether to copy indices in init\n self.primary_key = None\n\n # Must copy if dtype are changing\n if not copy and dtype is not None:\n raise ValueError('Cannot specify dtype when copy=False')\n\n # Specifies list of names found for the case of initializing table with\n # a list of dict. If data are not list of dict then this is None.\n names_from_list_of_dict = None\n\n # Row-oriented input, e.g. list of lists or list of tuples, list of\n # dict, Row instance. Set data to something that the subsequent code\n # will parse correctly.\n if rows is not None:\n if data is not None:\n raise ValueError('Cannot supply both `data` and `rows` values')\n if isinstance(rows, types.GeneratorType):\n # Without this then the all(..) test below uses up the generator\n rows = list(rows)\n\n # Get column names if `rows` is a list of dict, otherwise this is None\n names_from_list_of_dict = _get_names_from_list_of_dict(rows)\n if names_from_list_of_dict:\n data = rows\n elif isinstance(rows, self.Row):\n data = rows\n else:\n data = list(zip(*rows))\n\n # Infer the type of the input data and set up the initialization\n # function, number of columns, and potentially the default col names\n\n default_names = None\n\n # Handle custom (subclass) table attributes that are stored in meta.\n # These are defined as class attributes using the MetaAttribute\n # descriptor. Any such attributes get removed from kwargs here.\n if kwargs:\n for attr in list(kwargs):\n descr = getattr(self.__class__, attr, None)\n if isinstance(descr, TableAttribute):\n setattr(self, attr, kwargs.pop(attr))\n\n if hasattr(data, '__astropy_table__'):\n # Data object implements the __astropy_table__ interface method.\n # Calling that method returns an appropriate instance of\n # self.__class__ and respects the `copy` arg. The returned\n # Table object should NOT then be copied.\n data = data.__astropy_table__(self.__class__, copy, **kwargs)\n copy = False\n elif kwargs:\n raise TypeError('__init__() got unexpected keyword argument {!r}'\n .format(list(kwargs.keys())[0]))\n\n if (isinstance(data, np.ndarray)\n and data.shape == (0,)\n and not data.dtype.names):\n data = None\n\n if isinstance(data, self.Row):\n data = data._table[data._index:data._index + 1]\n\n if isinstance(data, (list, tuple)):\n # Get column names from `data` if it is a list of dict, otherwise this is None.\n # This might be previously defined if `rows` was supplied as an init arg.\n names_from_list_of_dict = (names_from_list_of_dict\n or _get_names_from_list_of_dict(data))\n if names_from_list_of_dict:\n init_func = self._init_from_list_of_dicts\n n_cols = len(names_from_list_of_dict)\n else:\n init_func = self._init_from_list\n n_cols = len(data)\n\n elif isinstance(data, np.ndarray):\n if data.dtype.names:\n init_func = self._init_from_ndarray # _struct\n n_cols = len(data.dtype.names)\n default_names = data.dtype.names\n else:\n init_func = self._init_from_ndarray # _homog\n if data.shape == ():\n raise ValueError('Can not initialize a Table with a scalar')\n elif len(data.shape) == 1:\n data = data[np.newaxis, :]\n n_cols = data.shape[1]\n\n elif isinstance(data, Mapping):\n init_func = self._init_from_dict\n default_names = list(data)\n n_cols = len(default_names)\n\n elif isinstance(data, Table):\n # If user-input meta is None then use data.meta (if non-trivial)\n if meta is None and data.meta:\n # At this point do NOT deepcopy data.meta as this will happen after\n # table init_func() is called. But for table input the table meta\n # gets a key copy here if copy=False because later a direct object ref\n # is used.\n meta = data.meta if copy else data.meta.copy()\n\n # Handle indices on input table. Copy primary key and don't copy indices\n # if the input Table is in non-copy mode.\n self.primary_key = data.primary_key\n self._init_indices = self._init_indices and data._copy_indices\n\n # Extract default names, n_cols, and then overwrite ``data`` to be the\n # table columns so we can use _init_from_list.\n default_names = data.colnames\n n_cols = len(default_names)\n data = list(data.columns.values())\n\n init_func = self._init_from_list\n\n elif data is None:\n if names is None:\n if dtype is None:\n if meta is not None:\n self.meta = deepcopy(meta) if copy else meta\n return\n try:\n # No data nor names but dtype is available. This must be\n # valid to initialize a structured array.\n dtype = np.dtype(dtype)\n names = dtype.names\n dtype = [dtype[name] for name in names]\n except Exception:\n raise ValueError('dtype was specified but could not be '\n 'parsed for column names')\n # names is guaranteed to be set at this point\n init_func = self._init_from_list\n n_cols = len(names)\n data = [[]] * n_cols\n\n else:\n raise ValueError('Data type {} not allowed to init Table'\n .format(type(data)))\n\n # Set up defaults if names and/or dtype are not specified.\n # A value of None means the actual value will be inferred\n # within the appropriate initialization routine, either from\n # existing specification or auto-generated.\n\n if names is None:\n names = default_names or [None] * n_cols\n if dtype is None:\n dtype = [None] * n_cols\n\n # Numpy does not support bytes column names on Python 3, so fix them\n # up now.\n names = [fix_column_name(name) for name in names]\n\n self._check_names_dtype(names, dtype, n_cols)\n\n # Finally do the real initialization\n init_func(data, names, dtype, n_cols, copy)\n\n # Set table meta. If copy=True then deepcopy meta otherwise use the\n # user-supplied meta directly.\n if meta is not None:\n self.meta = deepcopy(meta) if copy else meta\n\n # Whatever happens above, the masked property should be set to a boolean\n if self.masked not in (None, True, False):\n raise TypeError(\"masked property must be None, True or False\")\n\n self._set_column_attribute('unit', units)\n self._set_column_attribute('description', descriptions)\n\n def _set_column_attribute(self, attr, values):\n \"\"\"Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column\n name) or a dict of name: value pairs. This is used for handling the ``units`` and\n ``descriptions`` kwargs to ``__init__``.\n \"\"\"\n if not values:\n return\n\n if isinstance(values, Row):\n # For a Row object transform to an equivalent dict.\n values = {name: values[name] for name in values.colnames}\n\n if not isinstance(values, dict):\n # If not a dict map, assume iterable and map to dict if the right length\n if len(values) != len(self.columns):\n raise ValueError(f'sequence of {attr} values must match number of columns')\n values = dict(zip(self.colnames, values))\n\n for name, value in values.items():\n if name not in self.columns:\n raise ValueError(f'invalid column name {name} for setting {attr} attribute')\n\n # Special case: ignore unit if it is an empty or blank string\n if attr == 'unit' and isinstance(value, str):\n if value.strip() == '':\n value = None\n\n if value not in (np.ma.masked, None):\n setattr(self[name].info, attr, value)\n\n def __getstate__(self):\n columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))\n for key, col in self.columns.items())\n return (columns, self.meta)\n\n def __setstate__(self, state):\n columns, meta = state\n self.__init__(columns, meta=meta)\n\n @property\n def mask(self):\n # Dynamic view of available masks\n if self.masked or self.has_masked_columns or self.has_masked_values:\n mask_table = Table([getattr(col, 'mask', FalseArray(col.shape))\n for col in self.itercols()],\n names=self.colnames, copy=False)\n\n # Set hidden attribute to force inplace setitem so that code like\n # t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.\n # See #5556 for discussion.\n mask_table._setitem_inplace = True\n else:\n mask_table = None\n\n return mask_table\n\n @mask.setter\n def mask(self, val):\n self.mask[:] = val\n\n @property\n def _mask(self):\n \"\"\"This is needed so that comparison of a masked Table and a\n MaskedArray works. The requirement comes from numpy.ma.core\n so don't remove this property.\"\"\"\n return self.as_array().mask\n\n def filled(self, fill_value=None):\n \"\"\"Return copy of self, with masked values filled.\n\n If input ``fill_value`` supplied then that value is used for all\n masked entries in the table. Otherwise the individual\n ``fill_value`` defined for each table column is used.\n\n Parameters\n ----------\n fill_value : str\n If supplied, this ``fill_value`` is used for all masked entries\n in the entire table.\n\n Returns\n -------\n filled_table : Table\n New table with masked values filled\n \"\"\"\n if self.masked or self.has_masked_columns or self.has_masked_values:\n # Get new columns with masked values filled, then create Table with those\n # new cols (copy=False) but deepcopy the meta.\n data = [col.filled(fill_value) if hasattr(col, 'filled') else col\n for col in self.itercols()]\n return self.__class__(data, meta=deepcopy(self.meta), copy=False)\n else:\n # Return copy of the original object.\n return self.copy()\n\n @property\n def indices(self):\n '''\n Return the indices associated with columns of the table\n as a TableIndices object.\n '''\n lst = []\n for column in self.columns.values():\n for index in column.info.indices:\n if sum([index is x for x in lst]) == 0: # ensure uniqueness\n lst.append(index)\n return TableIndices(lst)\n\n @property\n def loc(self):\n '''\n Return a TableLoc object that can be used for retrieving\n rows by index in a given data range. Note that both loc\n and iloc work only with single-column indices.\n '''\n return TableLoc(self)\n\n @property\n def loc_indices(self):\n \"\"\"\n Return a TableLocIndices object that can be used for retrieving\n the row indices corresponding to given table index key value or values.\n \"\"\"\n return TableLocIndices(self)\n\n @property\n def iloc(self):\n '''\n Return a TableILoc object that can be used for retrieving\n indexed rows in the order they appear in the index.\n '''\n return TableILoc(self)\n\n def add_index(self, colnames, engine=None, unique=False):\n '''\n Insert a new index among one or more columns.\n If there are no indices, make this index the\n primary table index.\n\n Parameters\n ----------\n colnames : str or list\n List of column names (or a single column name) to index\n engine : type or None\n Indexing engine class to use, from among SortedArray, BST,\n FastBST, FastRBT, and SCEngine. If the supplied argument is None\n (by default), use SortedArray.\n unique : bool\n Whether the values of the index must be unique. Default is False.\n '''\n if isinstance(colnames, str):\n colnames = (colnames,)\n columns = self.columns[tuple(colnames)].values()\n\n # make sure all columns support indexing\n for col in columns:\n if not getattr(col.info, '_supports_indexing', False):\n raise ValueError('Cannot create an index on column \"{}\", of '\n 'type \"{}\"'.format(col.info.name, type(col)))\n\n index = Index(columns, engine=engine, unique=unique)\n if not self.indices:\n self.primary_key = colnames\n for col in columns:\n col.info.indices.append(index)\n\n def remove_indices(self, colname):\n '''\n Remove all indices involving the given column.\n If the primary index is removed, the new primary\n index will be the most recently added remaining\n index.\n\n Parameters\n ----------\n colname : str\n Name of column\n '''\n col = self.columns[colname]\n for index in self.indices:\n try:\n index.col_position(col.info.name)\n except ValueError:\n pass\n else:\n for c in index.columns:\n c.info.indices.remove(index)\n\n def index_mode(self, mode):\n '''\n Return a context manager for an indexing mode.\n\n Parameters\n ----------\n mode : str\n Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.\n In 'discard_on_copy' mode,\n indices are not copied whenever columns or tables are copied.\n In 'freeze' mode, indices are not modified whenever columns are\n modified; at the exit of the context, indices refresh themselves\n based on column values. This mode is intended for scenarios in\n which one intends to make many additions or modifications in an\n indexed column.\n In 'copy_on_getitem' mode, indices are copied when taking column\n slices as well as table slices, so col[i0:i1] will preserve\n indices.\n '''\n return _IndexModeContext(self, mode)\n\n def __array__(self, dtype=None):\n \"\"\"Support converting Table to np.array via np.array(table).\n\n Coercion to a different dtype via np.array(table, dtype) is not\n supported and will raise a ValueError.\n \"\"\"\n if dtype is not None:\n raise ValueError('Datatype coercion is not allowed')\n\n # This limitation is because of the following unexpected result that\n # should have made a table copy while changing the column names.\n #\n # >>> d = astropy.table.Table([[1,2],[3,4]])\n # >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])\n # array([(0, 0), (0, 0)],\n # dtype=[('a', '<i8'), ('b', '<i8')])\n\n out = self.as_array()\n return out.data if isinstance(out, np.ma.MaskedArray) else out\n\n def _check_names_dtype(self, names, dtype, n_cols):\n \"\"\"Make sure that names and dtype are both iterable and have\n the same length as data.\n \"\"\"\n for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):\n if not isiterable(inp_list):\n raise ValueError(f'{inp_str} must be a list or None')\n\n if len(names) != n_cols or len(dtype) != n_cols:\n raise ValueError(\n 'Arguments \"names\" and \"dtype\" must match number of columns'\n .format(inp_str))\n\n def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):\n \"\"\"Initialize table from a list of dictionaries representing rows.\"\"\"\n # Define placeholder for missing values as a unique object that cannot\n # every occur in user data.\n MISSING = object()\n\n # Gather column names that exist in the input `data`.\n names_from_data = set()\n for row in data:\n names_from_data.update(row)\n\n # Put names into a preferred order, either using the first row of data\n # if it is ordered, or alphabetically. Starting with Python 3.7, dict\n # is ordered so this test can be relaxed. (In practice CPython 3.6 is\n # this way, but not according to the formal spec).\n if (isinstance(data[0], OrderedDict)\n and set(data[0].keys()) == names_from_data):\n names_from_data = list(data[0].keys())\n else:\n names_from_data = sorted(names_from_data)\n\n # Note: if set(data[0].keys()) != names_from_data, this will give an\n # exception later, so NO need to catch here.\n\n # Convert list of dict into dict of list (cols), keep track of missing\n # indexes and put in MISSING placeholders in the `cols` lists.\n cols = {}\n missing_indexes = defaultdict(list)\n for name in names_from_data:\n cols[name] = []\n for ii, row in enumerate(data):\n try:\n val = row[name]\n except KeyError:\n missing_indexes[name].append(ii)\n val = MISSING\n cols[name].append(val)\n\n # Fill the missing entries with first values\n if missing_indexes:\n for name, indexes in missing_indexes.items():\n col = cols[name]\n first_val = next(val for val in col if val is not MISSING)\n for index in indexes:\n col[index] = first_val\n\n # prepare initialization\n if all(name is None for name in names):\n names = names_from_data\n\n self._init_from_dict(cols, names, dtype, n_cols, copy)\n\n # Mask the missing values if necessary, converting columns to MaskedColumn\n # as needed.\n if missing_indexes:\n for name, indexes in missing_indexes.items():\n col = self[name]\n # Ensure that any Column subclasses with MISSING values can support\n # setting masked values. As of astropy 4.0 the test condition below is\n # always True since _init_from_dict cannot result in mixin columns.\n if isinstance(col, Column) and not isinstance(col, MaskedColumn):\n self[name] = self.MaskedColumn(col, copy=False)\n\n # Finally do the masking in a mixin-safe way.\n self[name][indexes] = np.ma.masked\n return\n\n def _init_from_list(self, data, names, dtype, n_cols, copy):\n \"\"\"Initialize table from a list of column data. A column can be a\n Column object, np.ndarray, mixin, or any other iterable object.\n \"\"\"\n cols = []\n default_names = _auto_names(n_cols)\n\n for col, name, default_name, dtype in zip(data, names, default_names, dtype):\n col = self._convert_data_to_col(col, copy, default_name, dtype, name)\n\n cols.append(col)\n\n self._init_from_cols(cols)\n\n def _convert_data_to_col(self, data, copy=True, default_name=None, dtype=None, name=None):\n \"\"\"\n Convert any allowed sequence data ``col`` to a column object that can be used\n directly in the self.columns dict. This could be a Column, MaskedColumn,\n or mixin column.\n\n The final column name is determined by::\n\n name or data.info.name or def_name\n\n If ``data`` has no ``info`` then ``name = name or def_name``.\n\n The behavior of ``copy`` for Column objects is:\n - copy=True: new class instance with a copy of data and deep copy of meta\n - copy=False: new class instance with same data and a key-only copy of meta\n\n For mixin columns:\n - copy=True: new class instance with copy of data and deep copy of meta\n - copy=False: original instance (no copy at all)\n\n Parameters\n ----------\n data : object (column-like sequence)\n Input column data\n copy : bool\n Make a copy\n default_name : str\n Default name\n dtype : np.dtype or None\n Data dtype\n name : str or None\n Column name\n\n Returns\n -------\n col : Column, MaskedColumn, mixin-column type\n Object that can be used as a column in self\n \"\"\"\n data_is_mixin = self._is_mixin_for_table(data)\n masked_col_cls = (self.ColumnClass\n if issubclass(self.ColumnClass, self.MaskedColumn)\n else self.MaskedColumn)\n try:\n data0_is_mixin = self._is_mixin_for_table(data[0])\n except Exception:\n # Need broad exception, cannot predict what data[0] raises for arbitrary data\n data0_is_mixin = False\n\n # Structured ndarray gets viewed as a mixin unless already a valid\n # mixin class\n if (not isinstance(data, Column) and not data_is_mixin\n and isinstance(data, np.ndarray) and len(data.dtype) > 1):\n data = data.view(NdarrayMixin)\n data_is_mixin = True\n\n # Get the final column name using precedence. Some objects may not\n # have an info attribute.\n if not name:\n if hasattr(data, 'info'):\n name = data.info.name or default_name\n else:\n name = default_name\n\n if isinstance(data, Column):\n # If self.ColumnClass is a subclass of col, then \"upgrade\" to ColumnClass,\n # otherwise just use the original class. The most common case is a\n # table with masked=True and ColumnClass=MaskedColumn. Then a Column\n # gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior\n # of downgrading from MaskedColumn to Column (for non-masked table)\n # does not happen.\n col_cls = self._get_col_cls_for_table(data)\n\n elif data_is_mixin:\n # Copy the mixin column attributes if they exist since the copy below\n # may not get this attribute.\n col = col_copy(data, copy_indices=self._init_indices) if copy else data\n col.info.name = name\n return col\n\n elif data0_is_mixin:\n # Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m].\n try:\n col = data[0].__class__(data)\n col.info.name = name\n col.info.indices = []\n return col\n except Exception:\n # If that didn't work for some reason, just turn it into np.array of object\n data = np.array(data, dtype=object)\n col_cls = self.ColumnClass\n\n elif isinstance(data, np.ma.MaskedArray):\n # Require that col_cls be a subclass of MaskedColumn, remembering\n # that ColumnClass could be a user-defined subclass (though more-likely\n # could be MaskedColumn).\n col_cls = masked_col_cls\n\n elif not hasattr(data, 'dtype'):\n # If value doesn't have a dtype then convert to a masked numpy array.\n # Then check if there were any masked elements. This logic is handling\n # normal lists like [1, 2] but also odd-ball cases like a list of masked\n # arrays (see #8977). Use np.ma.array() to do the heavy lifting.\n try:\n np_data = np.ma.array(data, dtype=dtype)\n except Exception:\n # Conversion failed for some reason, e.g. [2, 1*u.m] gives TypeError in Quantity\n np_data = np.ma.array(data, dtype=object)\n\n if np_data.ndim > 0 and len(np_data) == 0:\n # Implies input was an empty list (e.g. initializing an empty table\n # with pre-declared names and dtypes but no data). Here we need to\n # fall through to initializing with the original data=[].\n col_cls = self.ColumnClass\n else:\n if np_data.mask is np.ma.nomask:\n data = np_data.data\n col_cls = self.ColumnClass\n else:\n data = np_data\n col_cls = masked_col_cls\n copy = False\n\n else:\n # `data` is none of the above, so just go for it and try init'ing Column\n col_cls = self.ColumnClass\n\n try:\n col = col_cls(name=name, data=data, dtype=dtype,\n copy=copy, copy_indices=self._init_indices)\n except Exception:\n # Broad exception class since we don't know what might go wrong\n raise ValueError('unable to convert data to Column for Table')\n\n col = self._convert_col_for_table(col)\n\n return col\n\n def _init_from_ndarray(self, data, names, dtype, n_cols, copy):\n \"\"\"Initialize table from an ndarray structured array\"\"\"\n\n data_names = data.dtype.names or _auto_names(n_cols)\n struct = data.dtype.names is not None\n names = [name or data_names[i] for i, name in enumerate(names)]\n\n cols = ([data[name] for name in data_names] if struct else\n [data[:, i] for i in range(n_cols)])\n\n self._init_from_list(cols, names, dtype, n_cols, copy)\n\n def _init_from_dict(self, data, names, dtype, n_cols, copy):\n \"\"\"Initialize table from a dictionary of columns\"\"\"\n\n data_list = [data[name] for name in names]\n self._init_from_list(data_list, names, dtype, n_cols, copy)\n\n def _get_col_cls_for_table(self, col):\n \"\"\"Get the correct column class to use for upgrading any Column-like object.\n\n For a masked table, ensure any Column-like object is a subclass\n of the table MaskedColumn.\n\n For unmasked table, ensure any MaskedColumn-like object is a subclass\n of the table MaskedColumn. If not a MaskedColumn, then ensure that any\n Column-like object is a subclass of the table Column.\n \"\"\"\n\n col_cls = col.__class__\n\n if self.masked:\n if isinstance(col, Column) and not isinstance(col, self.MaskedColumn):\n col_cls = self.MaskedColumn\n else:\n if isinstance(col, MaskedColumn):\n if not isinstance(col, self.MaskedColumn):\n col_cls = self.MaskedColumn\n elif isinstance(col, Column) and not isinstance(col, self.Column):\n col_cls = self.Column\n\n return col_cls\n\n def _convert_col_for_table(self, col):\n \"\"\"\n Make sure that all Column objects have correct base class for this type of\n Table. For a base Table this most commonly means setting to\n MaskedColumn if the table is masked. Table subclasses like QTable\n override this method.\n \"\"\"\n if isinstance(col, Column) and not isinstance(col, self.ColumnClass):\n col_cls = self._get_col_cls_for_table(col)\n if col_cls is not col.__class__:\n col = col_cls(col, copy=False)\n\n return col\n\n def _init_from_cols(self, cols):\n \"\"\"Initialize table from a list of Column or mixin objects\"\"\"\n\n lengths = set(len(col) for col in cols)\n if len(lengths) > 1:\n raise ValueError('Inconsistent data column lengths: {}'\n .format(lengths))\n\n # Make sure that all Column-based objects have correct class. For\n # plain Table this is self.ColumnClass, but for instance QTable will\n # convert columns with units to a Quantity mixin.\n newcols = [self._convert_col_for_table(col) for col in cols]\n self._make_table_from_cols(self, newcols)\n\n # Deduplicate indices. It may happen that after pickling or when\n # initing from an existing table that column indices which had been\n # references to a single index object got *copied* into an independent\n # object. This results in duplicates which will cause downstream problems.\n index_dict = {}\n for col in self.itercols():\n for i, index in enumerate(col.info.indices or []):\n names = tuple(ind_col.info.name for ind_col in index.columns)\n if names in index_dict:\n col.info.indices[i] = index_dict[names]\n else:\n index_dict[names] = index\n\n def _new_from_slice(self, slice_):\n \"\"\"Create a new table as a referenced slice from self.\"\"\"\n\n table = self.__class__(masked=self.masked)\n if self.meta:\n table.meta = self.meta.copy() # Shallow copy for slice\n table.primary_key = self.primary_key\n\n newcols = []\n for col in self.columns.values():\n newcol = col[slice_]\n\n # Note in line below, use direct attribute access to col.indices for Column\n # instances instead of the generic col.info.indices. This saves about 4 usec\n # per column.\n if (col if isinstance(col, Column) else col.info).indices:\n # TODO : as far as I can tell the only purpose of setting _copy_indices\n # here is to communicate that to the initial test in `slice_indices`.\n # Why isn't that just sent as an arg to the function?\n col.info._copy_indices = self._copy_indices\n newcol = col.info.slice_indices(newcol, slice_, len(col))\n\n # Don't understand why this is forcing a value on the original column.\n # Normally col.info does not even have a _copy_indices attribute. Tests\n # still pass if this line is deleted. (Each col.info attribute access\n # is expensive).\n col.info._copy_indices = True\n\n newcols.append(newcol)\n\n self._make_table_from_cols(table, newcols, verify=False, names=self.columns.keys())\n return table\n\n @staticmethod\n def _make_table_from_cols(table, cols, verify=True, names=None):\n \"\"\"\n Make ``table`` in-place so that it represents the given list of ``cols``.\n \"\"\"\n if names is None:\n names = [col.info.name for col in cols]\n\n # Note: we do not test for len(names) == len(cols) if names is not None. In that\n # case the function is being called by from \"trusted\" source (e.g. right above here)\n # that is assumed to provide valid inputs. In that case verify=False.\n\n if verify:\n if None in names:\n raise TypeError('Cannot have None for column name')\n if len(set(names)) != len(names):\n raise ValueError('Duplicate column names')\n\n table.columns = table.TableColumns((name, col) for name, col in zip(names, cols))\n\n for col in cols:\n table._set_col_parent_table_and_mask(col)\n\n def _set_col_parent_table_and_mask(self, col):\n \"\"\"\n Set ``col.parent_table = self`` and force ``col`` to have ``mask``\n attribute if the table is masked and ``col.mask`` does not exist.\n \"\"\"\n # For Column instances it is much faster to do direct attribute access\n # instead of going through .info\n col_info = col if isinstance(col, Column) else col.info\n col_info.parent_table = self\n\n # Legacy behavior for masked table\n if self.masked and not hasattr(col, 'mask'):\n col.mask = FalseArray(col.shape)\n\n def itercols(self):\n \"\"\"\n Iterate over the columns of this table.\n\n Examples\n --------\n\n To iterate over the columns of a table::\n\n >>> t = Table([[1], [2]])\n >>> for col in t.itercols():\n ... print(col)\n col0\n ----\n 1\n col1\n ----\n 2\n\n Using ``itercols()`` is similar to ``for col in t.columns.values()``\n but is syntactically preferred.\n \"\"\"\n for colname in self.columns:\n yield self[colname]\n\n def _base_repr_(self, html=False, descr_vals=None, max_width=None,\n tableid=None, show_dtype=True, max_lines=None,\n tableclass=None):\n if descr_vals is None:\n descr_vals = [self.__class__.__name__]\n if self.masked:\n descr_vals.append('masked=True')\n descr_vals.append('length={}'.format(len(self)))\n\n descr = ' '.join(descr_vals)\n if html:\n from astropy.utils.xml.writer import xml_escape\n descr = '<i>{}</i>\\n'.format(xml_escape(descr))\n else:\n descr = f'<{descr}>\\n'\n\n if tableid is None:\n tableid = 'table{id}'.format(id=id(self))\n\n data_lines, outs = self.formatter._pformat_table(\n self, tableid=tableid, html=html, max_width=max_width,\n show_name=True, show_unit=None, show_dtype=show_dtype,\n max_lines=max_lines, tableclass=tableclass)\n\n out = descr + '\\n'.join(data_lines)\n\n return out\n\n def _repr_html_(self):\n return self._base_repr_(html=True, max_width=-1,\n tableclass=conf.default_notebook_table_class)\n\n def __repr__(self):\n return self._base_repr_(html=False, max_width=None)\n\n def __str__(self):\n return '\\n'.join(self.pformat())\n\n def __bytes__(self):\n return str(self).encode('utf-8')\n\n @property\n def has_mixin_columns(self):\n \"\"\"\n True if table has any mixin columns (defined as columns that are not Column\n subclasses).\n \"\"\"\n return any(has_info_class(col, MixinInfo) for col in self.columns.values())\n\n @property\n def has_masked_columns(self):\n \"\"\"True if table has any ``MaskedColumn`` columns.\n\n This does not check for mixin columns that may have masked values, use the\n ``has_masked_values`` property in that case.\n\n \"\"\"\n return any(isinstance(col, MaskedColumn) for col in self.itercols())\n\n @property\n def has_masked_values(self):\n \"\"\"True if column in the table has values which are masked.\n\n This may be relatively slow for large tables as it requires checking the mask\n values of each column.\n \"\"\"\n for col in self.itercols():\n if hasattr(col, 'mask') and np.any(col.mask):\n return True\n else:\n return False\n\n def _is_mixin_for_table(self, col):\n \"\"\"\n Determine if ``col`` should be added to the table directly as\n a mixin column.\n \"\"\"\n if isinstance(col, BaseColumn):\n return False\n\n # Is it a mixin but not not Quantity (which gets converted to Column with\n # unit set).\n return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)\n\n @format_doc(_pprint_docs)\n def pprint(self, max_lines=None, max_width=None, show_name=True,\n show_unit=None, show_dtype=False, align=None):\n \"\"\"Print a formatted string representation of the table.\n\n If no value of ``max_lines`` is supplied then the height of the\n screen terminal is used to set ``max_lines``. If the terminal\n height cannot be determined then the default is taken from the\n configuration item ``astropy.conf.max_lines``. If a negative\n value of ``max_lines`` is supplied then there is no line limit\n applied.\n\n The same applies for max_width except the configuration item is\n ``astropy.conf.max_width``.\n\n \"\"\"\n lines, outs = self.formatter._pformat_table(self, max_lines, max_width,\n show_name=show_name, show_unit=show_unit,\n show_dtype=show_dtype, align=align)\n if outs['show_length']:\n lines.append('Length = {} rows'.format(len(self)))\n\n n_header = outs['n_header']\n\n for i, line in enumerate(lines):\n if i < n_header:\n color_print(line, 'red')\n else:\n print(line)\n\n @format_doc(_pprint_docs)\n def pprint_all(self, max_lines=-1, max_width=-1, show_name=True,\n show_unit=None, show_dtype=False, align=None):\n \"\"\"Print a formatted string representation of the entire table.\n\n This method is the same as `astropy.table.Table.pprint` except that\n the default ``max_lines`` and ``max_width`` are both -1 so that by\n default the entire table is printed instead of restricting to the size\n of the screen terminal.\n\n \"\"\"\n return self.pprint(max_lines, max_width, show_name,\n show_unit, show_dtype, align)\n\n def _make_index_row_display_table(self, index_row_name):\n if index_row_name not in self.columns:\n idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))\n return self.__class__([idx_col] + self.columns.values(),\n copy=False)\n else:\n return self\n\n def show_in_notebook(self, tableid=None, css=None, display_length=50,\n table_class='astropy-default', show_row_index='idx'):\n \"\"\"Render the table in HTML and show it in the IPython notebook.\n\n Parameters\n ----------\n tableid : str or `None`\n An html ID tag for the table. Default is ``table{id}-XXX``, where\n id is the unique integer id of the table object, id(self), and XXX\n is a random number to avoid conflicts when printing the same table\n multiple times.\n table_class : str or `None`\n A string with a list of HTML classes used to style the table.\n The special default string ('astropy-default') means that the string\n will be retrieved from the configuration item\n ``astropy.table.default_notebook_table_class``. Note that these\n table classes may make use of bootstrap, as this is loaded with the\n notebook. See `this page <https://getbootstrap.com/css/#tables>`_\n for the list of classes.\n css : str\n A valid CSS string declaring the formatting for the table. Defaults\n to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.\n display_length : int, optional\n Number or rows to show. Defaults to 50.\n show_row_index : str or False\n If this does not evaluate to False, a column with the given name\n will be added to the version of the table that gets displayed.\n This new column shows the index of the row in the table itself,\n even when the displayed table is re-sorted by another column. Note\n that if a column with this name already exists, this option will be\n ignored. Defaults to \"idx\".\n\n Notes\n -----\n Currently, unlike `show_in_browser` (with ``jsviewer=True``), this\n method needs to access online javascript code repositories. This is due\n to modern browsers' limitations on accessing local files. Hence, if you\n call this method while offline (and don't have a cached version of\n jquery and jquery.dataTables), you will not get the jsviewer features.\n \"\"\"\n\n from .jsviewer import JSViewer\n from IPython.display import HTML\n\n if tableid is None:\n tableid = 'table{}-{}'.format(id(self),\n np.random.randint(1, 1e6))\n\n jsv = JSViewer(display_length=display_length)\n if show_row_index:\n display_table = self._make_index_row_display_table(show_row_index)\n else:\n display_table = self\n if table_class == 'astropy-default':\n table_class = conf.default_notebook_table_class\n html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,\n max_lines=-1, show_dtype=False,\n tableclass=table_class)\n\n columns = display_table.columns.values()\n sortable_columns = [i for i, col in enumerate(columns)\n if col.info.dtype.kind in 'iufc']\n html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)\n return HTML(html)\n\n def show_in_browser(self, max_lines=5000, jsviewer=False,\n browser='default', jskwargs={'use_local_files': True},\n tableid=None, table_class=\"display compact\",\n css=None, show_row_index='idx'):\n \"\"\"Render the table in HTML and show it in a web browser.\n\n Parameters\n ----------\n max_lines : int\n Maximum number of rows to export to the table (set low by default\n to avoid memory issues, since the browser view requires duplicating\n the table in memory). A negative value of ``max_lines`` indicates\n no row limit.\n jsviewer : bool\n If `True`, prepends some javascript headers so that the table is\n rendered as a `DataTables <https://datatables.net>`_ data table.\n This allows in-browser searching & sorting.\n browser : str\n Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,\n ``'safari'`` (for mac, you may need to use ``'open -a\n \"/Applications/Google Chrome.app\" {}'`` for Chrome). If\n ``'default'``, will use the system default browser.\n jskwargs : dict\n Passed to the `astropy.table.JSViewer` init. Defaults to\n ``{'use_local_files': True}`` which means that the JavaScript\n libraries will be served from local copies.\n tableid : str or `None`\n An html ID tag for the table. Default is ``table{id}``, where id\n is the unique integer id of the table object, id(self).\n table_class : str or `None`\n A string with a list of HTML classes used to style the table.\n Default is \"display compact\", and other possible values can be\n found in https://www.datatables.net/manual/styling/classes\n css : str\n A valid CSS string declaring the formatting for the table. Defaults\n to ``astropy.table.jsviewer.DEFAULT_CSS``.\n show_row_index : str or False\n If this does not evaluate to False, a column with the given name\n will be added to the version of the table that gets displayed.\n This new column shows the index of the row in the table itself,\n even when the displayed table is re-sorted by another column. Note\n that if a column with this name already exists, this option will be\n ignored. Defaults to \"idx\".\n \"\"\"\n\n import os\n import webbrowser\n import tempfile\n from .jsviewer import DEFAULT_CSS\n from urllib.parse import urljoin\n from urllib.request import pathname2url\n\n if css is None:\n css = DEFAULT_CSS\n\n # We can't use NamedTemporaryFile here because it gets deleted as\n # soon as it gets garbage collected.\n tmpdir = tempfile.mkdtemp()\n path = os.path.join(tmpdir, 'table.html')\n\n with open(path, 'w') as tmp:\n if jsviewer:\n if show_row_index:\n display_table = self._make_index_row_display_table(show_row_index)\n else:\n display_table = self\n display_table.write(tmp, format='jsviewer', css=css,\n max_lines=max_lines, jskwargs=jskwargs,\n table_id=tableid, table_class=table_class)\n else:\n self.write(tmp, format='html')\n\n try:\n br = webbrowser.get(None if browser == 'default' else browser)\n except webbrowser.Error:\n log.error(f\"Browser '{browser}' not found.\")\n else:\n br.open(urljoin('file:', pathname2url(path)))\n\n @format_doc(_pformat_docs, id=\"{id}\")\n def pformat(self, max_lines=None, max_width=None, show_name=True,\n show_unit=None, show_dtype=False, html=False, tableid=None,\n align=None, tableclass=None):\n \"\"\"Return a list of lines for the formatted string representation of\n the table.\n\n If no value of ``max_lines`` is supplied then the height of the\n screen terminal is used to set ``max_lines``. If the terminal\n height cannot be determined then the default is taken from the\n configuration item ``astropy.conf.max_lines``. If a negative\n value of ``max_lines`` is supplied then there is no line limit\n applied.\n\n The same applies for ``max_width`` except the configuration item is\n ``astropy.conf.max_width``.\n\n \"\"\"\n\n lines, outs = self.formatter._pformat_table(\n self, max_lines, max_width, show_name=show_name,\n show_unit=show_unit, show_dtype=show_dtype, html=html,\n tableid=tableid, tableclass=tableclass, align=align)\n\n if outs['show_length']:\n lines.append('Length = {} rows'.format(len(self)))\n\n return lines\n\n @format_doc(_pformat_docs, id=\"{id}\")\n def pformat_all(self, max_lines=-1, max_width=-1, show_name=True,\n show_unit=None, show_dtype=False, html=False, tableid=None,\n align=None, tableclass=None):\n \"\"\"Return a list of lines for the formatted string representation of\n the entire table.\n\n If no value of ``max_lines`` is supplied then the height of the\n screen terminal is used to set ``max_lines``. If the terminal\n height cannot be determined then the default is taken from the\n configuration item ``astropy.conf.max_lines``. If a negative\n value of ``max_lines`` is supplied then there is no line limit\n applied.\n\n The same applies for ``max_width`` except the configuration item is\n ``astropy.conf.max_width``.\n\n \"\"\"\n\n return self.pformat(max_lines, max_width, show_name,\n show_unit, show_dtype, html, tableid,\n align, tableclass)\n\n def more(self, max_lines=None, max_width=None, show_name=True,\n show_unit=None, show_dtype=False):\n \"\"\"Interactively browse table with a paging interface.\n\n Supported keys::\n\n f, <space> : forward one page\n b : back one page\n r : refresh same page\n n : next row\n p : previous row\n < : go to beginning\n > : go to end\n q : quit browsing\n h : print this help\n\n Parameters\n ----------\n max_lines : int\n Maximum number of lines in table output\n\n max_width : int or `None`\n Maximum character width of output\n\n show_name : bool\n Include a header row for column names. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is to show a row\n for units only if one or more columns has a defined value\n for the unit.\n\n show_dtype : bool\n Include a header row for column dtypes. Default is True.\n \"\"\"\n self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,\n show_unit=show_unit, show_dtype=show_dtype)\n\n def __getitem__(self, item):\n if isinstance(item, str):\n return self.columns[item]\n elif isinstance(item, (int, np.integer)):\n return self.Row(self, item)\n elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):\n return self.Row(self, item.item())\n elif self._is_list_or_tuple_of_str(item):\n out = self.__class__([self[x] for x in item],\n copy_indices=self._copy_indices)\n out._groups = groups.TableGroups(out, indices=self.groups._indices,\n keys=self.groups._keys)\n out.meta = self.meta.copy() # Shallow copy for meta\n return out\n elif ((isinstance(item, np.ndarray) and item.size == 0)\n or (isinstance(item, (tuple, list)) and not item)):\n # If item is an empty array/list/tuple then return the table with no rows\n return self._new_from_slice([])\n elif (isinstance(item, slice)\n or isinstance(item, np.ndarray)\n or isinstance(item, list)\n or isinstance(item, tuple) and all(isinstance(x, np.ndarray)\n for x in item)):\n # here for the many ways to give a slice; a tuple of ndarray\n # is produced by np.where, as in t[np.where(t['a'] > 2)]\n # For all, a new table is constructed with slice of all columns\n return self._new_from_slice(item)\n else:\n raise ValueError('Illegal type {} for table item access'\n .format(type(item)))\n\n def __setitem__(self, item, value):\n # If the item is a string then it must be the name of a column.\n # If that column doesn't already exist then create it now.\n if isinstance(item, str) and item not in self.colnames:\n self.add_column(value, name=item, copy=True)\n\n else:\n n_cols = len(self.columns)\n\n if isinstance(item, str):\n # Set an existing column by first trying to replace, and if\n # this fails do an in-place update. See definition of mask\n # property for discussion of the _setitem_inplace attribute.\n if (not getattr(self, '_setitem_inplace', False)\n and not conf.replace_inplace):\n try:\n self._replace_column_warnings(item, value)\n return\n except Exception:\n pass\n self.columns[item][:] = value\n\n elif isinstance(item, (int, np.integer)):\n self._set_row(idx=item, colnames=self.colnames, vals=value)\n\n elif (isinstance(item, slice)\n or isinstance(item, np.ndarray)\n or isinstance(item, list)\n or (isinstance(item, tuple) # output from np.where\n and all(isinstance(x, np.ndarray) for x in item))):\n\n if isinstance(value, Table):\n vals = (col for col in value.columns.values())\n\n elif isinstance(value, np.ndarray) and value.dtype.names:\n vals = (value[name] for name in value.dtype.names)\n\n elif np.isscalar(value):\n vals = itertools.repeat(value, n_cols)\n\n else: # Assume this is an iterable that will work\n if len(value) != n_cols:\n raise ValueError('Right side value needs {} elements (one for each column)'\n .format(n_cols))\n vals = value\n\n for col, val in zip(self.columns.values(), vals):\n col[item] = val\n\n else:\n raise ValueError('Illegal type {} for table item access'\n .format(type(item)))\n\n def __delitem__(self, item):\n if isinstance(item, str):\n self.remove_column(item)\n elif isinstance(item, (int, np.integer)):\n self.remove_row(item)\n elif (isinstance(item, (list, tuple, np.ndarray))\n and all(isinstance(x, str) for x in item)):\n self.remove_columns(item)\n elif (isinstance(item, (list, np.ndarray))\n and np.asarray(item).dtype.kind == 'i'):\n self.remove_rows(item)\n elif isinstance(item, slice):\n self.remove_rows(item)\n else:\n raise IndexError('illegal key or index value')\n\n def _ipython_key_completions_(self):\n return self.colnames\n\n def field(self, item):\n \"\"\"Return column[item] for recarray compatibility.\"\"\"\n return self.columns[item]\n\n @property\n def masked(self):\n return self._masked\n\n @masked.setter\n def masked(self, masked):\n raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'\n ' to convert to a masked table)')\n\n def _set_masked(self, masked):\n \"\"\"\n Set the table masked property.\n\n Parameters\n ----------\n masked : bool\n State of table masking (`True` or `False`)\n \"\"\"\n if masked in [True, False, None]:\n self._masked = masked\n else:\n raise ValueError(\"masked should be one of True, False, None\")\n\n self._column_class = self.MaskedColumn if self._masked else self.Column\n\n @property\n def ColumnClass(self):\n if self._column_class is None:\n return self.Column\n else:\n return self._column_class\n\n @property\n def dtype(self):\n return np.dtype([descr(col) for col in self.columns.values()])\n\n @property\n def colnames(self):\n return list(self.columns.keys())\n\n @staticmethod\n def _is_list_or_tuple_of_str(names):\n \"\"\"Check that ``names`` is a tuple or list of strings\"\"\"\n return (isinstance(names, (tuple, list)) and names\n and all(isinstance(x, str) for x in names))\n\n def keys(self):\n return list(self.columns.keys())\n\n def values(self):\n return self.columns.values()\n\n def items(self):\n return self.columns.items()\n\n def __len__(self):\n # For performance reasons (esp. in Row) cache the first column name\n # and use that subsequently for the table length. If might not be\n # available yet or the column might be gone now, in which case\n # try again in the except block.\n try:\n return len(OrderedDict.__getitem__(self.columns, self._first_colname))\n except (AttributeError, KeyError):\n if len(self.columns) == 0:\n return 0\n\n # Get the first column name\n self._first_colname = next(iter(self.columns))\n return len(self.columns[self._first_colname])\n\n def index_column(self, name):\n \"\"\"\n Return the positional index of column ``name``.\n\n Parameters\n ----------\n name : str\n column name\n\n Returns\n -------\n index : int\n Positional index of column ``name``.\n\n Examples\n --------\n Create a table with three columns 'a', 'b' and 'c'::\n\n >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],\n ... names=('a', 'b', 'c'))\n >>> print(t)\n a b c\n --- --- ---\n 1 0.1 x\n 2 0.2 y\n 3 0.3 z\n\n Get index of column 'b' of the table::\n\n >>> t.index_column('b')\n 1\n \"\"\"\n try:\n return self.colnames.index(name)\n except ValueError:\n raise ValueError(f\"Column {name} does not exist\")\n\n def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True,\n default_name=None):\n \"\"\"\n Add a new column to the table using ``col`` as input. If ``index``\n is supplied then insert column before ``index`` position\n in the list of columns, otherwise append column to the end\n of the list.\n\n The ``col`` input can be any data object which is acceptable as a\n `~astropy.table.Table` column object or can be converted. This includes\n mixin columns and scalar or length=1 objects which get broadcast to match\n the table length.\n\n To add several columns at once use ``add_columns()`` or simply call\n ``add_column()`` for each one. There is very little performance difference\n in the two approaches.\n\n Parameters\n ----------\n col : object\n Data object for the new column\n index : int or `None`\n Insert column before this position or at end (default).\n name : str\n Column name\n rename_duplicate : bool\n Uniquify column name if it already exist. Default is False.\n copy : bool\n Make a copy of the new column. Default is True.\n default_name : str or `None`\n Name to use if both ``name`` and ``col.info.name`` are not available.\n Defaults to ``col{number_of_columns}``.\n\n Examples\n --------\n Create a table with two columns 'a' and 'b', then create a third column 'c'\n and append it to the end of the table::\n\n >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))\n >>> col_c = Column(name='c', data=['x', 'y'])\n >>> t.add_column(col_c)\n >>> print(t)\n a b c\n --- --- ---\n 1 0.1 x\n 2 0.2 y\n\n Add column 'd' at position 1. Note that the column is inserted\n before the given index::\n\n >>> t.add_column(['a', 'b'], name='d', index=1)\n >>> print(t)\n a d b c\n --- --- --- ---\n 1 a 0.1 x\n 2 b 0.2 y\n\n Add second column named 'b' with rename_duplicate::\n\n >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))\n >>> t.add_column(1.1, name='b', rename_duplicate=True)\n >>> print(t)\n a b b_1\n --- --- ---\n 1 0.1 1.1\n 2 0.2 1.1\n\n Add an unnamed column or mixin object in the table using a default name\n or by specifying an explicit name with ``name``. Name can also be overridden::\n\n >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))\n >>> t.add_column(['a', 'b'])\n >>> t.add_column(col_c, name='d')\n >>> print(t)\n a b col2 d\n --- --- ---- ---\n 1 0.1 a x\n 2 0.2 b y\n \"\"\"\n if default_name is None:\n default_name = 'col{}'.format(len(self.columns))\n\n # Convert col data to acceptable object for insertion into self.columns.\n # Note that along with the lines above and below, this allows broadcasting\n # of scalars to the correct shape for adding to table.\n col = self._convert_data_to_col(col, name=name, copy=copy,\n default_name=default_name)\n\n # Make col data shape correct for scalars. The second test is to allow\n # broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]].\n if (col.shape == () or col.shape[0] == 1) and len(self) > 0:\n new_shape = (len(self),) + getattr(col, 'shape', ())[1:]\n if isinstance(col, np.ndarray):\n col = np.broadcast_to(col, shape=new_shape,\n subok=True)\n elif isinstance(col, ShapedLikeNDArray):\n col = col._apply(np.broadcast_to, shape=new_shape,\n subok=True)\n\n # broadcast_to() results in a read-only array. Apparently it only changes\n # the view to look like the broadcasted array. So copy.\n col = col_copy(col)\n\n name = col.info.name\n\n # Ensure that new column is the right length\n if len(self.columns) > 0 and len(col) != len(self):\n raise ValueError('Inconsistent data column lengths')\n\n if rename_duplicate:\n orig_name = name\n i = 1\n while name in self.columns:\n # Iterate until a unique name is found\n name = orig_name + '_' + str(i)\n i += 1\n col.info.name = name\n\n # Set col parent_table weakref and ensure col has mask attribute if table.masked\n self._set_col_parent_table_and_mask(col)\n\n # Add new column as last column\n self.columns[name] = col\n\n if index is not None:\n # Move the other cols to the right of the new one\n move_names = self.colnames[index:-1]\n for move_name in move_names:\n self.columns.move_to_end(move_name, last=True)\n\n def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):\n \"\"\"\n Add a list of new columns the table using ``cols`` data objects. If a\n corresponding list of ``indexes`` is supplied then insert column\n before each ``index`` position in the *original* list of columns,\n otherwise append columns to the end of the list.\n\n The ``cols`` input can include any data objects which are acceptable as\n `~astropy.table.Table` column objects or can be converted. This includes\n mixin columns and scalar or length=1 objects which get broadcast to match\n the table length.\n\n From a performance perspective there is little difference between calling\n this method once or looping over the new columns and calling ``add_column()``\n for each column.\n\n Parameters\n ----------\n cols : list of objects\n List of data objects for the new columns\n indexes : list of ints or `None`\n Insert column before this position or at end (default).\n names : list of str\n Column names\n copy : bool\n Make a copy of the new columns. Default is True.\n rename_duplicate : bool\n Uniquify new column names if they duplicate the existing ones.\n Default is False.\n\n\n Examples\n --------\n Create a table with two columns 'a' and 'b', then create columns 'c' and 'd'\n and append them to the end of the table::\n\n >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))\n >>> col_c = Column(name='c', data=['x', 'y'])\n >>> col_d = Column(name='d', data=['u', 'v'])\n >>> t.add_columns([col_c, col_d])\n >>> print(t)\n a b c d\n --- --- --- ---\n 1 0.1 x u\n 2 0.2 y v\n\n Add column 'c' at position 0 and column 'd' at position 1. Note that\n the columns are inserted before the given position::\n\n >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))\n >>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'],\n ... indexes=[0, 1])\n >>> print(t)\n c a d b\n --- --- --- ---\n x 1 u 0.1\n y 2 v 0.2\n\n Add second column 'b' and column 'c' with ``rename_duplicate``::\n\n >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))\n >>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'),\n ... rename_duplicate=True)\n >>> print(t)\n a b b_1 c\n --- --- --- ---\n 1 0.1 1.1 x\n 2 0.2 1.2 y\n\n Add unnamed columns or mixin objects in the table using default names\n or by specifying explicit names with ``names``. Names can also be overridden::\n\n >>> t = Table()\n >>> col_b = Column(name='b', data=['u', 'v'])\n >>> t.add_columns([[1, 2], col_b])\n >>> t.add_columns([[3, 4], col_b], names=['c', 'd'])\n >>> print(t)\n col0 b c d\n ---- --- --- ---\n 1 u 3 u\n 2 v 4 v\n \"\"\"\n if indexes is None:\n indexes = [len(self.columns)] * len(cols)\n elif len(indexes) != len(cols):\n raise ValueError('Number of indexes must match number of cols')\n\n if names is None:\n names = (None,) * len(cols)\n elif len(names) != len(cols):\n raise ValueError('Number of names must match number of cols')\n\n default_names = ['col{}'.format(ii + len(self.columns))\n for ii in range(len(cols))]\n\n for ii in reversed(np.argsort(indexes)):\n self.add_column(cols[ii], index=indexes[ii], name=names[ii],\n default_name=default_names[ii],\n rename_duplicate=rename_duplicate, copy=copy)\n\n def _replace_column_warnings(self, name, col):\n \"\"\"\n Same as replace_column but issues warnings under various circumstances.\n \"\"\"\n warns = conf.replace_warnings\n\n if 'refcount' in warns and name in self.colnames:\n refcount = sys.getrefcount(self[name])\n\n if name in self.colnames:\n old_col = self[name]\n\n # This may raise an exception (e.g. t['a'] = 1) in which case none of\n # the downstream code runs.\n self.replace_column(name, col)\n\n if 'always' in warns:\n warnings.warn(f\"replaced column '{name}'\",\n TableReplaceWarning, stacklevel=3)\n\n if 'slice' in warns:\n try:\n # Check for ndarray-subclass slice. An unsliced instance\n # has an ndarray for the base while sliced has the same class\n # as parent.\n if isinstance(old_col.base, old_col.__class__):\n msg = (\"replaced column '{}' which looks like an array slice. \"\n \"The new column no longer shares memory with the \"\n \"original array.\".format(name))\n warnings.warn(msg, TableReplaceWarning, stacklevel=3)\n except AttributeError:\n pass\n\n if 'refcount' in warns:\n # Did reference count change?\n new_refcount = sys.getrefcount(self[name])\n if refcount != new_refcount:\n msg = (\"replaced column '{}' and the number of references \"\n \"to the column changed.\".format(name))\n warnings.warn(msg, TableReplaceWarning, stacklevel=3)\n\n if 'attributes' in warns:\n # Any of the standard column attributes changed?\n changed_attrs = []\n new_col = self[name]\n # Check base DataInfo attributes that any column will have\n for attr in DataInfo.attr_names:\n if getattr(old_col.info, attr) != getattr(new_col.info, attr):\n changed_attrs.append(attr)\n\n if changed_attrs:\n msg = (\"replaced column '{}' and column attributes {} changed.\"\n .format(name, changed_attrs))\n warnings.warn(msg, TableReplaceWarning, stacklevel=3)\n\n def replace_column(self, name, col, copy=True):\n \"\"\"\n Replace column ``name`` with the new ``col`` object.\n\n The behavior of ``copy`` for Column objects is:\n - copy=True: new class instance with a copy of data and deep copy of meta\n - copy=False: new class instance with same data and a key-only copy of meta\n\n For mixin columns:\n - copy=True: new class instance with copy of data and deep copy of meta\n - copy=False: original instance (no copy at all)\n\n Parameters\n ----------\n name : str\n Name of column to replace\n col : column object (list, ndarray, Column, etc)\n New column object to replace the existing column\n copy : bool\n Make copy of the input ``col``, default=True\n\n Examples\n --------\n Replace column 'a' with a float version of itself::\n\n >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))\n >>> float_a = t['a'].astype(float)\n >>> t.replace_column('a', float_a)\n \"\"\"\n if name not in self.colnames:\n raise ValueError(f'column name {name} is not in the table')\n\n if self[name].info.indices:\n raise ValueError('cannot replace a table index column')\n\n col = self._convert_data_to_col(col, name=name, copy=copy)\n self._set_col_parent_table_and_mask(col)\n\n # Ensure that new column is the right length, unless it is the only column\n # in which case re-sizing is allowed.\n if len(self.columns) > 1 and len(col) != len(self[name]):\n raise ValueError('length of new column must match table length')\n\n self.columns.__setitem__(name, col, validated=True)\n\n def remove_row(self, index):\n \"\"\"\n Remove a row from the table.\n\n Parameters\n ----------\n index : int\n Index of row to remove\n\n Examples\n --------\n Create a table with three columns 'a', 'b' and 'c'::\n\n >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],\n ... names=('a', 'b', 'c'))\n >>> print(t)\n a b c\n --- --- ---\n 1 0.1 x\n 2 0.2 y\n 3 0.3 z\n\n Remove row 1 from the table::\n\n >>> t.remove_row(1)\n >>> print(t)\n a b c\n --- --- ---\n 1 0.1 x\n 3 0.3 z\n\n To remove several rows at the same time use remove_rows.\n \"\"\"\n # check the index against the types that work with np.delete\n if not isinstance(index, (int, np.integer)):\n raise TypeError(\"Row index must be an integer\")\n self.remove_rows(index)\n\n def remove_rows(self, row_specifier):\n \"\"\"\n Remove rows from the table.\n\n Parameters\n ----------\n row_specifier : slice, int, or array of ints\n Specification for rows to remove\n\n Examples\n --------\n Create a table with three columns 'a', 'b' and 'c'::\n\n >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],\n ... names=('a', 'b', 'c'))\n >>> print(t)\n a b c\n --- --- ---\n 1 0.1 x\n 2 0.2 y\n 3 0.3 z\n\n Remove rows 0 and 2 from the table::\n\n >>> t.remove_rows([0, 2])\n >>> print(t)\n a b c\n --- --- ---\n 2 0.2 y\n\n\n Note that there are no warnings if the slice operator extends\n outside the data::\n\n >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],\n ... names=('a', 'b', 'c'))\n >>> t.remove_rows(slice(10, 20, 1))\n >>> print(t)\n a b c\n --- --- ---\n 1 0.1 x\n 2 0.2 y\n 3 0.3 z\n \"\"\"\n # Update indices\n for index in self.indices:\n index.remove_rows(row_specifier)\n\n keep_mask = np.ones(len(self), dtype=bool)\n keep_mask[row_specifier] = False\n\n columns = self.TableColumns()\n for name, col in self.columns.items():\n newcol = col[keep_mask]\n newcol.info.parent_table = self\n columns[name] = newcol\n\n self._replace_cols(columns)\n\n # Revert groups to default (ungrouped) state\n if hasattr(self, '_groups'):\n del self._groups\n\n def iterrows(self, *names):\n \"\"\"\n Iterate over rows of table returning a tuple of values for each row.\n\n This method is especially useful when only a subset of columns are needed.\n\n The ``iterrows`` method can be substantially faster than using the standard\n Table row iteration (e.g. ``for row in tbl:``), since that returns a new\n ``~astropy.table.Row`` object for each row and accessing a column in that\n row (e.g. ``row['col0']``) is slower than tuple access.\n\n Parameters\n ----------\n names : list\n List of column names (default to all columns if no names provided)\n\n Returns\n -------\n rows : iterator returning tuples of row values\n\n Examples\n --------\n Create a table with three columns 'a', 'b' and 'c'::\n\n >>> t = Table({'a': [1, 2, 3],\n ... 'b': [1.0, 2.5, 3.0],\n ... 'c': ['x', 'y', 'z']})\n\n To iterate row-wise using column names::\n\n >>> for a, c in t.iterrows('a', 'c'):\n ... print(a, c)\n 1 x\n 2 y\n 3 z\n\n \"\"\"\n if len(names) == 0:\n names = self.colnames\n else:\n for name in names:\n if name not in self.colnames:\n raise ValueError(f'{name} is not a valid column name')\n\n cols = (self[name] for name in names)\n out = zip(*cols)\n return out\n\n def remove_column(self, name):\n \"\"\"\n Remove a column from the table.\n\n This can also be done with::\n\n del table[name]\n\n Parameters\n ----------\n name : str\n Name of column to remove\n\n Examples\n --------\n Create a table with three columns 'a', 'b' and 'c'::\n\n >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],\n ... names=('a', 'b', 'c'))\n >>> print(t)\n a b c\n --- --- ---\n 1 0.1 x\n 2 0.2 y\n 3 0.3 z\n\n Remove column 'b' from the table::\n\n >>> t.remove_column('b')\n >>> print(t)\n a c\n --- ---\n 1 x\n 2 y\n 3 z\n\n To remove several columns at the same time use remove_columns.\n \"\"\"\n\n self.remove_columns([name])\n\n def remove_columns(self, names):\n '''\n Remove several columns from the table.\n\n Parameters\n ----------\n names : list\n A list containing the names of the columns to remove\n\n Examples\n --------\n Create a table with three columns 'a', 'b' and 'c'::\n\n >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],\n ... names=('a', 'b', 'c'))\n >>> print(t)\n a b c\n --- --- ---\n 1 0.1 x\n 2 0.2 y\n 3 0.3 z\n\n Remove columns 'b' and 'c' from the table::\n\n >>> t.remove_columns(['b', 'c'])\n >>> print(t)\n a\n ---\n 1\n 2\n 3\n\n Specifying only a single column also works. Remove column 'b' from the table::\n\n >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],\n ... names=('a', 'b', 'c'))\n >>> t.remove_columns('b')\n >>> print(t)\n a c\n --- ---\n 1 x\n 2 y\n 3 z\n\n This gives the same as using remove_column.\n '''\n if isinstance(names, str):\n names = [names]\n\n for name in names:\n if name not in self.columns:\n raise KeyError(f\"Column {name} does not exist\")\n\n for name in names:\n self.columns.pop(name)\n\n def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):\n \"\"\"\n Convert string-like columns to/from bytestring and unicode (internal only).\n\n Parameters\n ----------\n in_kind : str\n Input dtype.kind\n out_kind : str\n Output dtype.kind\n \"\"\"\n\n for col in self.itercols():\n if col.dtype.kind == in_kind:\n try:\n # This requires ASCII and is faster by a factor of up to ~8, so\n # try that first.\n newcol = col.__class__(col, dtype=out_kind)\n except (UnicodeEncodeError, UnicodeDecodeError):\n newcol = col.__class__(encode_decode_func(col, 'utf-8'))\n\n # Quasi-manually copy info attributes. Unfortunately\n # DataInfo.__set__ does not do the right thing in this case\n # so newcol.info = col.info does not get the old info attributes.\n for attr in col.info.attr_names - col.info._attrs_no_copy - set(['dtype']):\n value = deepcopy(getattr(col.info, attr))\n setattr(newcol.info, attr, value)\n\n self[col.name] = newcol\n\n def convert_bytestring_to_unicode(self):\n \"\"\"\n Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')\n using UTF-8 encoding.\n\n Internally this changes string columns to represent each character\n in the string with a 4-byte UCS-4 equivalent, so it is inefficient\n for memory but allows scripts to manipulate string arrays with\n natural syntax.\n \"\"\"\n self._convert_string_dtype('S', 'U', np.char.decode)\n\n def convert_unicode_to_bytestring(self):\n \"\"\"\n Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')\n using UTF-8 encoding.\n\n When exporting a unicode string array to a file, it may be desirable\n to encode unicode columns as bytestrings.\n \"\"\"\n self._convert_string_dtype('U', 'S', np.char.encode)\n\n def keep_columns(self, names):\n '''\n Keep only the columns specified (remove the others).\n\n Parameters\n ----------\n names : list\n A list containing the names of the columns to keep. All other\n columns will be removed.\n\n Examples\n --------\n Create a table with three columns 'a', 'b' and 'c'::\n\n >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],\n ... names=('a', 'b', 'c'))\n >>> print(t)\n a b c\n --- --- ---\n 1 0.1 x\n 2 0.2 y\n 3 0.3 z\n\n Specifying only a single column name keeps only this column.\n Keep only column 'a' of the table::\n\n >>> t.keep_columns('a')\n >>> print(t)\n a\n ---\n 1\n 2\n 3\n\n Specifying a list of column names is keeps is also possible.\n Keep columns 'a' and 'c' of the table::\n\n >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],\n ... names=('a', 'b', 'c'))\n >>> t.keep_columns(['a', 'c'])\n >>> print(t)\n a c\n --- ---\n 1 x\n 2 y\n 3 z\n '''\n\n if isinstance(names, str):\n names = [names]\n\n for name in names:\n if name not in self.columns:\n raise KeyError(f\"Column {name} does not exist\")\n\n remove = list(set(self.keys()) - set(names))\n\n self.remove_columns(remove)\n\n def rename_column(self, name, new_name):\n '''\n Rename a column.\n\n This can also be done directly with by setting the ``name`` attribute\n for a column::\n\n table[name].name = new_name\n\n TODO: this won't work for mixins\n\n Parameters\n ----------\n name : str\n The current name of the column.\n new_name : str\n The new name for the column\n\n Examples\n --------\n Create a table with three columns 'a', 'b' and 'c'::\n\n >>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))\n >>> print(t)\n a b c\n --- --- ---\n 1 3 5\n 2 4 6\n\n Renaming column 'a' to 'aa'::\n\n >>> t.rename_column('a' , 'aa')\n >>> print(t)\n aa b c\n --- --- ---\n 1 3 5\n 2 4 6\n '''\n\n if name not in self.keys():\n raise KeyError(f\"Column {name} does not exist\")\n\n self.columns[name].info.name = new_name\n\n def rename_columns(self, names, new_names):\n '''\n Rename multiple columns.\n\n Parameters\n ----------\n names : list, tuple\n A list or tuple of existing column names.\n new_names : list, tuple\n A list or tuple of new column names.\n\n Examples\n --------\n Create a table with three columns 'a', 'b', 'c'::\n\n >>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))\n >>> print(t)\n a b c\n --- --- ---\n 1 3 5\n 2 4 6\n\n Renaming columns 'a' to 'aa' and 'b' to 'bb'::\n\n >>> names = ('a','b')\n >>> new_names = ('aa','bb')\n >>> t.rename_columns(names, new_names)\n >>> print(t)\n aa bb c\n --- --- ---\n 1 3 5\n 2 4 6\n '''\n\n if not self._is_list_or_tuple_of_str(names):\n raise TypeError(\"input 'names' must be a tuple or a list of column names\")\n\n if not self._is_list_or_tuple_of_str(new_names):\n raise TypeError(\"input 'new_names' must be a tuple or a list of column names\")\n\n if len(names) != len(new_names):\n raise ValueError(\"input 'names' and 'new_names' list arguments must be the same length\")\n\n for name, new_name in zip(names, new_names):\n self.rename_column(name, new_name)\n\n def _set_row(self, idx, colnames, vals):\n try:\n assert len(vals) == len(colnames)\n except Exception:\n raise ValueError('right hand side must be a sequence of values with '\n 'the same length as the number of selected columns')\n\n # Keep track of original values before setting each column so that\n # setting row can be transactional.\n orig_vals = []\n cols = self.columns\n try:\n for name, val in zip(colnames, vals):\n orig_vals.append(cols[name][idx])\n cols[name][idx] = val\n except Exception:\n # If anything went wrong first revert the row update then raise\n for name, val in zip(colnames, orig_vals[:-1]):\n cols[name][idx] = val\n raise\n\n def add_row(self, vals=None, mask=None):\n \"\"\"Add a new row to the end of the table.\n\n The ``vals`` argument can be:\n\n sequence (e.g. tuple or list)\n Column values in the same order as table columns.\n mapping (e.g. dict)\n Keys corresponding to column names. Missing values will be\n filled with np.zeros for the column dtype.\n `None`\n All values filled with np.zeros for the column dtype.\n\n This method requires that the Table object \"owns\" the underlying array\n data. In particular one cannot add a row to a Table that was\n initialized with copy=False from an existing array.\n\n The ``mask`` attribute should give (if desired) the mask for the\n values. The type of the mask should match that of the values, i.e. if\n ``vals`` is an iterable, then ``mask`` should also be an iterable\n with the same length, and if ``vals`` is a mapping, then ``mask``\n should be a dictionary.\n\n Parameters\n ----------\n vals : tuple, list, dict or `None`\n Use the specified values in the new row\n mask : tuple, list, dict or `None`\n Use the specified mask values in the new row\n\n Examples\n --------\n Create a table with three columns 'a', 'b' and 'c'::\n\n >>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))\n >>> print(t)\n a b c\n --- --- ---\n 1 4 7\n 2 5 8\n\n Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::\n\n >>> t.add_row([3,6,9])\n >>> print(t)\n a b c\n --- --- ---\n 1 4 7\n 2 5 8\n 3 6 9\n \"\"\"\n self.insert_row(len(self), vals, mask)\n\n def insert_row(self, index, vals=None, mask=None):\n \"\"\"Add a new row before the given ``index`` position in the table.\n\n The ``vals`` argument can be:\n\n sequence (e.g. tuple or list)\n Column values in the same order as table columns.\n mapping (e.g. dict)\n Keys corresponding to column names. Missing values will be\n filled with np.zeros for the column dtype.\n `None`\n All values filled with np.zeros for the column dtype.\n\n The ``mask`` attribute should give (if desired) the mask for the\n values. The type of the mask should match that of the values, i.e. if\n ``vals`` is an iterable, then ``mask`` should also be an iterable\n with the same length, and if ``vals`` is a mapping, then ``mask``\n should be a dictionary.\n\n Parameters\n ----------\n vals : tuple, list, dict or `None`\n Use the specified values in the new row\n mask : tuple, list, dict or `None`\n Use the specified mask values in the new row\n \"\"\"\n colnames = self.colnames\n\n N = len(self)\n if index < -N or index > N:\n raise IndexError(\"Index {} is out of bounds for table with length {}\"\n .format(index, N))\n if index < 0:\n index += N\n\n def _is_mapping(obj):\n \"\"\"Minimal checker for mapping (dict-like) interface for obj\"\"\"\n attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')\n return all(hasattr(obj, attr) for attr in attrs)\n\n if _is_mapping(vals) or vals is None:\n # From the vals and/or mask mappings create the corresponding lists\n # that have entries for each table column.\n if mask is not None and not _is_mapping(mask):\n raise TypeError(\"Mismatch between type of vals and mask\")\n\n # Now check that the mask is specified for the same keys as the\n # values, otherwise things get really confusing.\n if mask is not None and set(vals.keys()) != set(mask.keys()):\n raise ValueError('keys in mask should match keys in vals')\n\n if vals and any(name not in colnames for name in vals):\n raise ValueError('Keys in vals must all be valid column names')\n\n vals_list = []\n mask_list = []\n\n for name in colnames:\n if vals and name in vals:\n vals_list.append(vals[name])\n mask_list.append(False if mask is None else mask[name])\n else:\n col = self[name]\n if hasattr(col, 'dtype'):\n # Make a placeholder zero element of the right type which is masked.\n # This assumes the appropriate insert() method will broadcast a\n # numpy scalar to the right shape.\n vals_list.append(np.zeros(shape=(), dtype=col.dtype))\n\n # For masked table any unsupplied values are masked by default.\n mask_list.append(self.masked and vals is not None)\n else:\n raise ValueError(f\"Value must be supplied for column '{name}'\")\n\n vals = vals_list\n mask = mask_list\n\n if isiterable(vals):\n if mask is not None and (not isiterable(mask) or _is_mapping(mask)):\n raise TypeError(\"Mismatch between type of vals and mask\")\n\n if len(self.columns) != len(vals):\n raise ValueError('Mismatch between number of vals and columns')\n\n if mask is not None:\n if len(self.columns) != len(mask):\n raise ValueError('Mismatch between number of masks and columns')\n else:\n mask = [False] * len(self.columns)\n\n else:\n raise TypeError('Vals must be an iterable or mapping or None')\n\n columns = self.TableColumns()\n try:\n # Insert val at index for each column\n for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):\n # If new val is masked and the existing column does not support masking\n # then upgrade the column to a mask-enabled type: either the table-level\n # default ColumnClass or else MaskedColumn.\n if mask_ and isinstance(col, Column) and not isinstance(col, MaskedColumn):\n col_cls = (self.ColumnClass\n if issubclass(self.ColumnClass, self.MaskedColumn)\n else self.MaskedColumn)\n col = col_cls(col, copy=False)\n\n newcol = col.insert(index, val, axis=0)\n\n if len(newcol) != N + 1:\n raise ValueError('Incorrect length for column {} after inserting {}'\n ' (expected {}, got {})'\n .format(name, val, len(newcol), N + 1))\n newcol.info.parent_table = self\n\n # Set mask if needed and possible\n if mask_:\n if hasattr(newcol, 'mask'):\n newcol[index] = np.ma.masked\n else:\n raise TypeError(\"mask was supplied for column '{}' but it does not \"\n \"support masked values\".format(col.info.name))\n\n columns[name] = newcol\n\n # insert row in indices\n for table_index in self.indices:\n table_index.insert_row(index, vals, self.columns.values())\n\n except Exception as err:\n raise ValueError(\"Unable to insert row because of exception in column '{}':\\n{}\"\n .format(name, err))\n else:\n self._replace_cols(columns)\n\n # Revert groups to default (ungrouped) state\n if hasattr(self, '_groups'):\n del self._groups\n\n def _replace_cols(self, columns):\n for col, new_col in zip(self.columns.values(), columns.values()):\n new_col.info.indices = []\n for index in col.info.indices:\n index.columns[index.col_position(col.info.name)] = new_col\n new_col.info.indices.append(index)\n\n self.columns = columns\n\n def argsort(self, keys=None, kind=None, reverse=False):\n \"\"\"\n Return the indices which would sort the table according to one or\n more key columns. This simply calls the `numpy.argsort` function on\n the table with the ``order`` parameter set to ``keys``.\n\n Parameters\n ----------\n keys : str or list of str\n The column name(s) to order the table by\n kind : {'quicksort', 'mergesort', 'heapsort'}, optional\n Sorting algorithm.\n reverse : bool\n Sort in reverse order (default=False)\n\n Returns\n -------\n index_array : ndarray, int\n Array of indices that sorts the table by the specified key\n column(s).\n \"\"\"\n if isinstance(keys, str):\n keys = [keys]\n\n # use index sorted order if possible\n if keys is not None:\n index = get_index(self, names=keys)\n if index is not None:\n idx = np.asarray(index.sorted_data())\n return idx[::-1] if reverse else idx\n\n kwargs = {}\n if keys:\n # For multiple keys return a structured array which gets sorted,\n # while for a single key return a single ndarray. Sorting a\n # one-column structured array is much slower than ndarray, e.g. a\n # factor of ~6 for a 10 million long random array.\n if len(keys) > 1:\n kwargs['order'] = keys\n data = self.as_array(names=keys)\n else:\n data = self[keys[0]].view(np.ndarray)\n else:\n # No keys provided so sort on all columns.\n data = self.as_array()\n\n if kind:\n kwargs['kind'] = kind\n\n idx = data.argsort(**kwargs)\n\n return idx[::-1] if reverse else idx\n\n def sort(self, keys=None, reverse=False):\n '''\n Sort the table according to one or more keys. This operates\n on the existing table and does not return a new table.\n\n Parameters\n ----------\n keys : str or list of str\n The key(s) to order the table by. If None, use the\n primary index of the Table.\n\n reverse : bool\n Sort in reverse order (default=False)\n\n Examples\n --------\n Create a table with 3 columns::\n\n >>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],\n ... [12, 15, 18]], names=('firstname', 'name', 'tel'))\n >>> print(t)\n firstname name tel\n --------- ------- ---\n Max Miller 12\n Jo Miller 15\n John Jackson 18\n\n Sorting according to standard sorting rules, first 'name' then 'firstname'::\n\n >>> t.sort(['name', 'firstname'])\n >>> print(t)\n firstname name tel\n --------- ------- ---\n John Jackson 18\n Jo Miller 15\n Max Miller 12\n\n Sorting according to standard sorting rules, first 'firstname' then 'tel',\n in reverse order::\n\n >>> t.sort(['firstname', 'tel'], reverse=True)\n >>> print(t)\n firstname name tel\n --------- ------- ---\n Max Miller 12\n John Jackson 18\n Jo Miller 15\n '''\n if keys is None:\n if not self.indices:\n raise ValueError(\"Table sort requires input keys or a table index\")\n keys = [x.info.name for x in self.indices[0].columns]\n\n if isinstance(keys, str):\n keys = [keys]\n\n indexes = self.argsort(keys)\n\n if reverse:\n indexes = indexes[::-1]\n\n with self.index_mode('freeze'):\n for name, col in self.columns.items():\n # Make a new sorted column. This requires that take() also copies\n # relevant info attributes for mixin columns.\n new_col = col.take(indexes, axis=0)\n\n # First statement in try: will succeed if the column supports an in-place\n # update, and matches the legacy behavior of astropy Table. However,\n # some mixin classes may not support this, so in that case just drop\n # in the entire new column. See #9553 and #9536 for discussion.\n try:\n col[:] = new_col\n except Exception:\n # In-place update failed for some reason, exception class not\n # predictable for arbitrary mixin.\n self[col.info.name] = new_col\n\n def reverse(self):\n '''\n Reverse the row order of table rows. The table is reversed\n in place and there are no function arguments.\n\n Examples\n --------\n Create a table with three columns::\n\n >>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],\n ... [12,15,18]], names=('firstname','name','tel'))\n >>> print(t)\n firstname name tel\n --------- ------- ---\n Max Miller 12\n Jo Miller 15\n John Jackson 18\n\n Reversing order::\n\n >>> t.reverse()\n >>> print(t)\n firstname name tel\n --------- ------- ---\n John Jackson 18\n Jo Miller 15\n Max Miller 12\n '''\n for col in self.columns.values():\n # First statement in try: will succeed if the column supports an in-place\n # update, and matches the legacy behavior of astropy Table. However,\n # some mixin classes may not support this, so in that case just drop\n # in the entire new column. See #9836, #9553, and #9536 for discussion.\n new_col = col[::-1]\n try:\n col[:] = new_col\n except Exception:\n # In-place update failed for some reason, exception class not\n # predictable for arbitrary mixin.\n self[col.info.name] = new_col\n\n for index in self.indices:\n index.reverse()\n\n def round(self, decimals=0):\n '''\n Round numeric columns in-place to the specified number of decimals.\n Non-numeric columns will be ignored.\n\n Examples\n --------\n Create three columns with different types:\n\n >>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],\n ... ['a', 'b', 'c']], names=('a', 'b', 'c'))\n >>> print(t)\n a b c\n --- ------ ---\n 1 -25.55 a\n 4 12.123 b\n 5 85.0 c\n\n Round them all to 0:\n\n >>> t.round(0)\n >>> print(t)\n a b c\n --- ----- ---\n 1 -26.0 a\n 4 12.0 b\n 5 85.0 c\n\n Round column 'a' to -1 decimal:\n\n >>> t.round({'a':-1})\n >>> print(t)\n a b c\n --- ----- ---\n 0 -26.0 a\n 0 12.0 b\n 0 85.0 c\n\n Parameters\n ----------\n decimals: int, dict\n Number of decimals to round the columns to. If a dict is given,\n the columns will be rounded to the number specified as the value.\n If a certain column is not in the dict given, it will remain the\n same.\n '''\n if isinstance(decimals, dict):\n decimal_values = decimals.values()\n column_names = decimals.keys()\n elif isinstance(decimals, int):\n decimal_values = itertools.repeat(decimals)\n column_names = self.colnames\n else:\n raise ValueError(\"'decimals' argument must be an int or a dict\")\n\n for colname, decimal in zip(column_names, decimal_values):\n col = self.columns[colname]\n if np.issubdtype(col.info.dtype, np.number):\n try:\n np.around(col, decimals=decimal, out=col)\n except TypeError:\n # Bug in numpy see https://github.com/numpy/numpy/issues/15438\n col[()] = np.around(col, decimals=decimal)\n\n def copy(self, copy_data=True):\n '''\n Return a copy of the table.\n\n Parameters\n ----------\n copy_data : bool\n If `True` (the default), copy the underlying data array.\n Otherwise, use the same data array. The ``meta`` is always\n deepcopied regardless of the value for ``copy_data``.\n '''\n out = self.__class__(self, copy=copy_data)\n\n # If the current table is grouped then do the same in the copy\n if hasattr(self, '_groups'):\n out._groups = groups.TableGroups(out, indices=self._groups._indices,\n keys=self._groups._keys)\n return out\n\n def __deepcopy__(self, memo=None):\n return self.copy(True)\n\n def __copy__(self):\n return self.copy(False)\n\n def __lt__(self, other):\n return super().__lt__(other)\n\n def __gt__(self, other):\n return super().__gt__(other)\n\n def __le__(self, other):\n return super().__le__(other)\n\n def __ge__(self, other):\n return super().__ge__(other)\n\n def __eq__(self, other):\n return self._rows_equal(other)\n\n def __ne__(self, other):\n return ~self.__eq__(other)\n\n def _rows_equal(self, other):\n \"\"\"\n Row-wise comparison of table with any other object.\n\n This is actual implementation for __eq__.\n\n Returns a 1-D boolean numpy array showing result of row-wise comparison.\n This is the same as the ``==`` comparison for tables.\n\n Parameters\n ----------\n other : Table or DataFrame or ndarray\n An object to compare with table\n\n Examples\n --------\n Comparing one Table with other::\n\n >>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))\n >>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))\n >>> t1._rows_equal(t2)\n array([ True, True])\n\n \"\"\"\n\n if isinstance(other, Table):\n other = other.as_array()\n\n if self.has_masked_columns:\n if isinstance(other, np.ma.MaskedArray):\n result = self.as_array() == other\n else:\n # If mask is True, then by definition the row doesn't match\n # because the other array is not masked.\n false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])\n result = (self.as_array().data == other) & (self.mask == false_mask)\n else:\n if isinstance(other, np.ma.MaskedArray):\n # If mask is True, then by definition the row doesn't match\n # because the other array is not masked.\n false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])\n result = (self.as_array() == other.data) & (other.mask == false_mask)\n else:\n result = self.as_array() == other\n\n return result\n\n def values_equal(self, other):\n \"\"\"\n Element-wise comparison of table with another table, list, or scalar.\n\n Returns a ``Table`` with the same columns containing boolean values\n showing result of comparison.\n\n Parameters\n ----------\n other : Table-like object or list or scalar\n Object to compare with table\n\n Examples\n --------\n Compare one Table with other::\n\n >>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c'))\n >>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c'))\n >>> t1.values_equal(t2)\n <Table length=2>\n a b c\n bool bool bool\n ---- ----- -----\n True False False\n True True True\n\n \"\"\"\n if isinstance(other, Table):\n names = other.colnames\n else:\n try:\n other = Table(other, copy=False)\n names = other.colnames\n except Exception:\n # Broadcast other into a dict, so e.g. other = 2 will turn into\n # other = {'a': 2, 'b': 2} and then equality does a\n # column-by-column broadcasting.\n names = self.colnames\n other = {name: other for name in names}\n\n # Require column names match but do not require same column order\n if set(self.colnames) != set(names):\n raise ValueError('cannot compare tables with different column names')\n\n eqs = []\n for name in names:\n try:\n np.broadcast(self[name], other[name]) # Check if broadcast-able\n # Catch the numpy FutureWarning related to equality checking,\n # \"elementwise comparison failed; returning scalar instead, but\n # in the future will perform elementwise comparison\". Turn this\n # into an exception since the scalar answer is not what we want.\n with warnings.catch_warnings(record=True) as warns:\n warnings.simplefilter('always')\n eq = self[name] == other[name]\n if (warns and issubclass(warns[-1].category, FutureWarning)\n and 'elementwise comparison failed' in str(warns[-1].message)):\n raise FutureWarning(warns[-1].message)\n except Exception as err:\n raise ValueError(f'unable to compare column {name}') from err\n\n # Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just\n # broken and completely ignores that it should return an array.\n if not (isinstance(eq, np.ndarray)\n and eq.dtype is np.dtype('bool')\n and len(eq) == len(self)):\n raise TypeError(f'comparison for column {name} returned {eq} '\n f'instead of the expected boolean ndarray')\n\n eqs.append(eq)\n\n out = Table(eqs, names=names)\n\n return out\n\n @property\n def groups(self):\n if not hasattr(self, '_groups'):\n self._groups = groups.TableGroups(self)\n return self._groups\n\n def group_by(self, keys):\n \"\"\"\n Group this table by the specified ``keys``\n\n This effectively splits the table into groups which correspond to unique\n values of the ``keys`` grouping object. The output is a new\n `~astropy.table.TableGroups` which contains a copy of this table but\n sorted by row according to ``keys``.\n\n The ``keys`` input to `group_by` can be specified in different ways:\n\n - String or list of strings corresponding to table column name(s)\n - Numpy array (homogeneous or structured) with same length as this table\n - `~astropy.table.Table` with same length as this table\n\n Parameters\n ----------\n keys : str, list of str, numpy array, or `~astropy.table.Table`\n Key grouping object\n\n Returns\n -------\n out : `~astropy.table.Table`\n New table with groups set\n \"\"\"\n return groups.table_group_by(self, keys)\n\n def to_pandas(self, index=None, use_nullable_int=True):\n \"\"\"\n Return a :class:`pandas.DataFrame` instance\n\n The index of the created DataFrame is controlled by the ``index``\n argument. For ``index=True`` or the default ``None``, an index will be\n specified for the DataFrame if there is a primary key index on the\n Table *and* if it corresponds to a single column. If ``index=False``\n then no DataFrame index will be specified. If ``index`` is the name of\n a column in the table then that will be the DataFrame index.\n\n In addition to vanilla columns or masked columns, this supports Table\n mixin columns like Quantity, Time, or SkyCoord. In many cases these\n objects have no analog in pandas and will be converted to a \"encoded\"\n representation using only Column or MaskedColumn. The exception is\n Time or TimeDelta columns, which will be converted to the corresponding\n representation in pandas using ``np.datetime64`` or ``np.timedelta64``.\n See the example below.\n\n Parameters\n ----------\n index : None, bool, str\n Specify DataFrame index mode\n use_nullable_int : bool, default=True\n Convert integer MaskedColumn to pandas nullable integer type.\n If ``use_nullable_int=False`` or the pandas version does not support\n nullable integer types (version < 0.24), then the column is converted\n to float with NaN for missing elements and a warning is issued.\n\n Returns\n -------\n dataframe : :class:`pandas.DataFrame`\n A pandas :class:`pandas.DataFrame` instance\n\n Raises\n ------\n ImportError\n If pandas is not installed\n ValueError\n If the Table has multi-dimensional columns\n\n Examples\n --------\n Here we convert a table with a few mixins to a\n :class:`pandas.DataFrame` instance.\n\n >>> import pandas as pd\n >>> from astropy.table import QTable\n >>> import astropy.units as u\n >>> from astropy.time import Time, TimeDelta\n >>> from astropy.coordinates import SkyCoord\n\n >>> q = [1, 2] * u.m\n >>> tm = Time([1998, 2002], format='jyear')\n >>> sc = SkyCoord([5, 6], [7, 8], unit='deg')\n >>> dt = TimeDelta([3, 200] * u.s)\n\n >>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])\n\n >>> df = t.to_pandas(index='tm')\n >>> with pd.option_context('display.max_columns', 20):\n ... print(df)\n q sc.ra sc.dec dt\n tm\n 1998-01-01 1.0 5.0 7.0 00:00:03\n 2002-01-01 2.0 6.0 8.0 00:03:20\n\n \"\"\"\n from pandas import DataFrame, Series\n\n if index is not False:\n if index in (None, True):\n # Default is to use the table primary key if available and a single column\n if self.primary_key and len(self.primary_key) == 1:\n index = self.primary_key[0]\n else:\n index = False\n else:\n if index not in self.colnames:\n raise ValueError('index must be None, False, True or a table '\n 'column name')\n\n def _encode_mixins(tbl):\n \"\"\"Encode a Table ``tbl`` that may have mixin columns to a Table with only\n astropy Columns + appropriate meta-data to allow subsequent decoding.\n \"\"\"\n from . import serialize\n from astropy.time import Time, TimeDelta\n\n # Convert any Time or TimeDelta columns and pay attention to masking\n time_cols = [col for col in tbl.itercols() if isinstance(col, Time)]\n if time_cols:\n\n # Make a light copy of table and clear any indices\n new_cols = []\n for col in tbl.itercols():\n new_col = col_copy(col, copy_indices=False) if col.info.indices else col\n new_cols.append(new_col)\n tbl = tbl.__class__(new_cols, copy=False)\n\n for col in time_cols:\n if isinstance(col, TimeDelta):\n # Convert to nanoseconds (matches astropy datetime64 support)\n new_col = (col.sec * 1e9).astype('timedelta64[ns]')\n nat = np.timedelta64('NaT')\n else:\n new_col = col.datetime64.copy()\n nat = np.datetime64('NaT')\n if col.masked:\n new_col[col.mask] = nat\n tbl[col.info.name] = new_col\n\n # Convert the table to one with no mixins, only Column objects.\n encode_tbl = serialize.represent_mixins_as_columns(tbl)\n return encode_tbl\n\n tbl = _encode_mixins(self)\n\n badcols = [name for name, col in self.columns.items() if len(col.shape) > 1]\n if badcols:\n raise ValueError(\n f'Cannot convert a table with multidimensional columns to a '\n f'pandas DataFrame. Offending columns are: {badcols}\\n'\n f'One can filter out such columns using:\\n'\n f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\\n'\n f'tbl[names].to_pandas(...)')\n\n out = OrderedDict()\n\n for name, column in tbl.columns.items():\n if isinstance(column, MaskedColumn) and np.any(column.mask):\n if column.dtype.kind in ['i', 'u']:\n pd_dtype = str(column.dtype)\n if use_nullable_int:\n # Convert int64 to Int64, uint32 to UInt32, etc for nullable types\n pd_dtype = pd_dtype.replace('i', 'I').replace('u', 'U')\n out[name] = Series(column, dtype=pd_dtype)\n\n # If pandas is older than 0.24 the type may have turned to float\n if column.dtype.kind != out[name].dtype.kind:\n warnings.warn(\n f\"converted column '{name}' from {column.dtype} to {out[name].dtype}\",\n TableReplaceWarning, stacklevel=3)\n elif column.dtype.kind in ['f', 'c']:\n out[name] = column\n else:\n out[name] = column.astype(object).filled(np.nan)\n else:\n out[name] = column\n\n if (hasattr(out[name].dtype, 'byteorder')\n and out[name].dtype.byteorder not in ('=', '|')):\n out[name] = out[name].byteswap().newbyteorder()\n\n kwargs = {'index': out.pop(index)} if index else {}\n\n return DataFrame(out, **kwargs)\n\n @classmethod\n def from_pandas(cls, dataframe, index=False, units=None):\n \"\"\"\n Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance\n\n In addition to converting generic numeric or string columns, this supports\n conversion of pandas Date and Time delta columns to `~astropy.time.Time`\n and `~astropy.time.TimeDelta` columns, respectively.\n\n Parameters\n ----------\n dataframe : :class:`pandas.DataFrame`\n A pandas :class:`pandas.DataFrame` instance\n index : bool\n Include the index column in the returned table (default=False)\n units: dict\n A dict mapping column names to to a `~astropy.units.Unit`.\n The columns will have the specified unit in the Table.\n\n Returns\n -------\n table : `~astropy.table.Table`\n A `~astropy.table.Table` (or subclass) instance\n\n Raises\n ------\n ImportError\n If pandas is not installed\n\n Examples\n --------\n Here we convert a :class:`pandas.DataFrame` instance\n to a `~astropy.table.QTable`.\n\n >>> import numpy as np\n >>> import pandas as pd\n >>> from astropy.table import QTable\n\n >>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')\n >>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))\n >>> df = pd.DataFrame({'time': time})\n >>> df['dt'] = dt\n >>> df['x'] = [3., 4.]\n >>> with pd.option_context('display.max_columns', 20):\n ... print(df)\n time dt x\n 0 1998-01-01 00:00:01 3.0\n 1 2002-01-01 00:05:00 4.0\n\n >>> QTable.from_pandas(df)\n <QTable length=2>\n time dt x\n object object float64\n ----------------------- ------ -------\n 1998-01-01T00:00:00.000 1.0 3.0\n 2002-01-01T00:00:00.000 300.0 4.0\n\n \"\"\"\n\n out = OrderedDict()\n\n names = list(dataframe.columns)\n columns = [dataframe[name] for name in names]\n datas = [np.array(column) for column in columns]\n masks = [np.array(column.isnull()) for column in columns]\n\n if index:\n index_name = dataframe.index.name or 'index'\n while index_name in names:\n index_name = '_' + index_name + '_'\n names.insert(0, index_name)\n columns.insert(0, dataframe.index)\n datas.insert(0, np.array(dataframe.index))\n masks.insert(0, np.zeros(len(dataframe), dtype=bool))\n\n if units is None:\n units = [None] * len(names)\n else:\n if not isinstance(units, Mapping):\n raise TypeError('Expected a Mapping \"column-name\" -> \"unit\"')\n\n not_found = set(units.keys()) - set(names)\n if not_found:\n warnings.warn('`units` contains additionial columns: {}'.format(\n not_found\n ))\n\n units = [units.get(name) for name in names]\n\n for name, column, data, mask, unit in zip(names, columns, datas, masks, units):\n\n if column.dtype.kind in ['u', 'i'] and np.any(mask):\n # Special-case support for pandas nullable int\n np_dtype = str(column.dtype).lower()\n data = np.zeros(shape=column.shape, dtype=np_dtype)\n data[~mask] = column[~mask]\n out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit, copy=False)\n continue\n\n if data.dtype.kind == 'O':\n # If all elements of an object array are string-like or np.nan\n # then coerce back to a native numpy str/unicode array.\n string_types = (str, bytes)\n nan = np.nan\n if all(isinstance(x, string_types) or x is nan for x in data):\n # Force any missing (null) values to b''. Numpy will\n # upcast to str/unicode as needed.\n data[mask] = b''\n\n # When the numpy object array is represented as a list then\n # numpy initializes to the correct string or unicode type.\n data = np.array([x for x in data])\n\n # Numpy datetime64\n if data.dtype.kind == 'M':\n from astropy.time import Time\n out[name] = Time(data, format='datetime64')\n if np.any(mask):\n out[name][mask] = np.ma.masked\n out[name].format = 'isot'\n\n # Numpy timedelta64\n elif data.dtype.kind == 'm':\n from astropy.time import TimeDelta\n data_sec = data.astype('timedelta64[ns]').astype(np.float64) / 1e9\n out[name] = TimeDelta(data_sec, format='sec')\n if np.any(mask):\n out[name][mask] = np.ma.masked\n\n else:\n if np.any(mask):\n out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit)\n else:\n out[name] = Column(data=data, name=name, unit=unit)\n\n return cls(out)\n\n info = TableInfo()\n\n\nclass QTable(Table):\n \"\"\"A class to represent tables of heterogeneous data.\n\n `~astropy.table.QTable` provides a class for heterogeneous tabular data\n which can be easily modified, for instance adding columns or new rows.\n\n The `~astropy.table.QTable` class is identical to `~astropy.table.Table`\n except that columns with an associated ``unit`` attribute are converted to\n `~astropy.units.Quantity` objects.\n\n See also:\n\n - http://docs.astropy.org/en/stable/table/\n - http://docs.astropy.org/en/stable/table/mixin_columns.html\n\n Parameters\n ----------\n data : numpy ndarray, dict, list, Table, or table-like object, optional\n Data to initialize table.\n masked : bool, optional\n Specify whether the table is masked.\n names : list, optional\n Specify column names.\n dtype : list, optional\n Specify column data types.\n meta : dict, optional\n Metadata associated with the table.\n copy : bool, optional\n Copy the input data. Default is True.\n rows : numpy ndarray, list of lists, optional\n Row-oriented data for table instead of ``data`` argument.\n copy_indices : bool, optional\n Copy any indices in the input data. Default is True.\n **kwargs : dict, optional\n Additional keyword args when converting table-like object.\n\n \"\"\"\n\n def _is_mixin_for_table(self, col):\n \"\"\"\n Determine if ``col`` should be added to the table directly as\n a mixin column.\n \"\"\"\n return has_info_class(col, MixinInfo)\n\n def _convert_col_for_table(self, col):\n if isinstance(col, Column) and getattr(col, 'unit', None) is not None:\n # What to do with MaskedColumn with units: leave as MaskedColumn or\n # turn into Quantity and drop mask? Assuming we have masking support\n # in Quantity someday, let's drop the mask (consistent with legacy\n # behavior) but issue a warning.\n if isinstance(col, MaskedColumn) and np.any(col.mask):\n warnings.warn(\"dropping mask in Quantity column '{}': \"\n \"masked Quantity not supported\".format(col.info.name))\n\n # We need to turn the column into a quantity, or a subclass\n # identified in the unit (such as u.mag()).\n q_cls = getattr(col.unit, '_quantity_class', Quantity)\n qcol = q_cls(col.data, col.unit, copy=False)\n qcol.info = col.info\n col = qcol\n else:\n col = super()._convert_col_for_table(col)\n\n return col\n\n\nclass NdarrayMixin(np.ndarray):\n \"\"\"\n Mixin column class to allow storage of arbitrary numpy\n ndarrays within a Table. This is a subclass of numpy.ndarray\n and has the same initialization options as ndarray().\n \"\"\"\n info = ParentDtypeInfo()\n\n def __new__(cls, obj, *args, **kwargs):\n self = np.array(obj, *args, **kwargs).view(cls)\n if 'info' in getattr(obj, '__dict__', ()):\n self.info = obj.info\n return self\n\n def __array_finalize__(self, obj):\n if obj is None:\n return\n\n if callable(super().__array_finalize__):\n super().__array_finalize__(obj)\n\n # Self was created from template (e.g. obj[slice] or (obj * 2))\n # or viewcast e.g. obj.view(Column). In either case we want to\n # init Column attributes for self from obj if possible.\n if 'info' in getattr(obj, '__dict__', ()):\n self.info = obj.info\n\n def __reduce__(self):\n # patch to pickle Quantity objects (ndarray subclasses), see\n # http://www.mail-archive.com/[email protected]/msg02446.html\n\n object_state = list(super().__reduce__())\n object_state[2] = (object_state[2], self.__dict__)\n return tuple(object_state)\n\n def __setstate__(self, state):\n # patch to unpickle NdarrayMixin objects (ndarray subclasses), see\n # http://www.mail-archive.com/[email protected]/msg02446.html\n\n nd_state, own_state = state\n super().__setstate__(nd_state)\n self.__dict__.update(own_state)\n\n\nclass TableAttribute(MetaAttribute):\n \"\"\"\n Descriptor to define a custom attribute for a Table subclass.\n\n The value of the ``TableAttribute`` will be stored in a dict named\n ``__attributes__`` that is stored in the table ``meta``. The attribute\n can be accessed and set in the usual way, and it can be provided when\n creating the object.\n\n Defining an attribute by this mechanism ensures that it will persist if\n the table is sliced or serialized, for example as a pickle or ECSV file.\n\n See the `~astropy.utils.metadata.MetaAttribute` documentation for additional\n details.\n\n Parameters\n ----------\n default : object\n Default value for attribute\n\n Examples\n --------\n >>> from astropy.table import Table, TableAttribute\n >>> class MyTable(Table):\n ... identifier = TableAttribute(default=1)\n >>> t = MyTable(identifier=10)\n >>> t.identifier\n 10\n >>> t.meta\n OrderedDict([('__attributes__', {'identifier': 10})])\n \"\"\"\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\nimport pytest\nimport numpy as np\n\nfrom astropy.tests.helper import catch_warnings\nfrom astropy.table import Table, QTable, NdarrayMixin, Column\nfrom astropy.table.table_helpers import simple_table\n\nfrom astropy import units as u\n\nfrom astropy.coordinates import (SkyCoord, Latitude, Longitude, Angle, EarthLocation,\n SphericalRepresentation, CartesianRepresentation,\n SphericalCosLatDifferential)\nfrom astropy.time import Time, TimeDelta\nfrom astropy.units.quantity import QuantityInfo\nfrom astropy.utils.exceptions import AstropyUserWarning\nfrom astropy.utils.data import get_pkg_data_filename\n\ntry:\n import h5py\nexcept ImportError:\n HAS_H5PY = False\nelse:\n HAS_H5PY = True\n\ntry:\n import yaml\nexcept ImportError:\n HAS_YAML = False\nelse:\n HAS_YAML = True\n\nALL_DTYPES = [np.uint8, np.uint16, np.uint32, np.uint64, np.int8,\n np.int16, np.int32, np.int64, np.float32, np.float64,\n np.bool_, '|S3']\n\n\ndef _default_values(dtype):\n if dtype == np.bool_:\n return [0, 1, 1]\n elif dtype == '|S3':\n return [b'abc', b'def', b'ghi']\n else:\n return [1, 2, 3]\n\n\[email protected]('not HAS_H5PY')\ndef test_write_nopath(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n with pytest.raises(ValueError) as exc:\n t1.write(test_file)\n assert exc.value.args[0] == \"table path should be set via the path= argument\"\n\n\[email protected]('not HAS_H5PY')\ndef test_write_nopath(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n\n with catch_warnings() as warns:\n t1.write(test_file)\n\n assert np.any([str(w.message).startswith(\n \"table path was not set via the path= argument\")\n for w in warns])\n t1 = Table.read(test_file, path='__astropy_table__')\n\n\[email protected]('not HAS_H5PY')\ndef test_write_nopath_nonempty(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n\n t1.write(test_file, path='bubu')\n\n with pytest.raises(ValueError) as exc:\n t1.write(test_file, append=True)\n\n assert 'table path should always be set via the path=' in exc.value.args[0]\n\n\[email protected]('not HAS_H5PY')\ndef test_read_notable_nopath(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n h5py.File(test_file, 'w').close() # create empty file\n with pytest.raises(ValueError) as exc:\n t1 = Table.read(test_file, path='/', format='hdf5')\n assert exc.value.args[0] == 'no table found in HDF5 group /'\n\n\[email protected]('not HAS_H5PY')\ndef test_read_nopath(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path=\"the_table\")\n with catch_warnings(AstropyUserWarning) as warning_lines:\n t2 = Table.read(test_file)\n assert not np.any([\"path= was not sp\" in str(wl.message)\n for wl in warning_lines])\n\n assert np.all(t1['a'] == t2['a'])\n\n\[email protected]('not HAS_H5PY')\ndef test_read_nopath_multi_tables(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path=\"the_table\")\n t1.write(test_file, path=\"the_table_but_different\", append=True,\n overwrite=True)\n with pytest.warns(AstropyUserWarning,\n match=r\"path= was not specified but multiple tables\"):\n t2 = Table.read(test_file)\n\n assert np.all(t1['a'] == t2['a'])\n\n\[email protected]('not HAS_H5PY')\ndef test_write_invalid_path(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n with pytest.raises(ValueError) as exc:\n t1.write(test_file, path='test/')\n assert exc.value.args[0] == \"table path should end with table name, not /\"\n\n\[email protected]('not HAS_H5PY')\ndef test_read_invalid_path(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path='the_table')\n with pytest.raises(OSError) as exc:\n Table.read(test_file, path='test/')\n assert exc.value.args[0] == \"Path test/ does not exist\"\n\n\[email protected]('not HAS_H5PY')\ndef test_read_missing_group(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n h5py.File(test_file, 'w').close() # create empty file\n with pytest.raises(OSError) as exc:\n Table.read(test_file, path='test/path/table')\n assert exc.value.args[0] == \"Path test/path/table does not exist\"\n\n\[email protected]('not HAS_H5PY')\ndef test_read_missing_table(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n with h5py.File(test_file, 'w') as f:\n f.create_group('test').create_group('path')\n with pytest.raises(OSError) as exc:\n Table.read(test_file, path='test/path/table')\n assert exc.value.args[0] == \"Path test/path/table does not exist\"\n\n\[email protected]('not HAS_H5PY')\ndef test_read_missing_group_fileobj(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n with h5py.File(test_file, 'w') as f:\n with pytest.raises(OSError) as exc:\n Table.read(f, path='test/path/table')\n assert exc.value.args[0] == \"Path test/path/table does not exist\"\n\n\[email protected]('not HAS_H5PY')\ndef test_read_write_simple(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path='the_table')\n t2 = Table.read(test_file, path='the_table')\n assert np.all(t2['a'] == [1, 2, 3])\n\n\[email protected]('not HAS_H5PY')\ndef test_read_write_existing_table(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path='the_table')\n with pytest.raises(OSError) as exc:\n t1.write(test_file, path='the_table', append=True)\n assert exc.value.args[0] == \"Table the_table already exists\"\n\n\[email protected]('not HAS_H5PY')\ndef test_read_write_memory(tmpdir):\n with h5py.File('test', 'w', driver='core', backing_store=False) as output_file:\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(output_file, path='the_table')\n t2 = Table.read(output_file, path='the_table')\n assert np.all(t2['a'] == [1, 2, 3])\n\n\[email protected]('not HAS_H5PY')\ndef test_read_write_existing(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n h5py.File(test_file, 'w').close() # create empty file\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n with pytest.raises(OSError) as exc:\n t1.write(test_file, path='the_table')\n assert exc.value.args[0].startswith(\"File exists:\")\n\n\[email protected]('not HAS_H5PY')\ndef test_read_write_existing_overwrite(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n h5py.File(test_file, 'w').close() # create empty file\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path='the_table', overwrite=True)\n t2 = Table.read(test_file, path='the_table')\n assert np.all(t2['a'] == [1, 2, 3])\n\n\[email protected]('not HAS_H5PY')\ndef test_read_write_existing_append(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n h5py.File(test_file, 'w').close() # create empty file\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path='the_table_1', append=True)\n t1.write(test_file, path='the_table_2', append=True)\n t2 = Table.read(test_file, path='the_table_1')\n assert np.all(t2['a'] == [1, 2, 3])\n t3 = Table.read(test_file, path='the_table_2')\n assert np.all(t3['a'] == [1, 2, 3])\n\n\[email protected]('not HAS_H5PY')\ndef test_read_write_existing_append_groups(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n with h5py.File(test_file, 'w') as f:\n f.create_group('test_1')\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path='test_1/the_table_1', append=True)\n t1.write(test_file, path='test_2/the_table_2', append=True)\n t2 = Table.read(test_file, path='test_1/the_table_1')\n assert np.all(t2['a'] == [1, 2, 3])\n t3 = Table.read(test_file, path='test_2/the_table_2')\n assert np.all(t3['a'] == [1, 2, 3])\n\n\[email protected]('not HAS_H5PY')\ndef test_read_write_existing_append_overwrite(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path='table1')\n t1.write(test_file, path='table2', append=True)\n t1v2 = Table()\n t1v2.add_column(Column(name='a', data=[4, 5, 6]))\n with pytest.raises(OSError) as exc:\n t1v2.write(test_file, path='table1', append=True)\n assert exc.value.args[0] == 'Table table1 already exists'\n t1v2.write(test_file, path='table1', append=True, overwrite=True)\n t2 = Table.read(test_file, path='table1')\n assert np.all(t2['a'] == [4, 5, 6])\n t3 = Table.read(test_file, path='table2')\n assert np.all(t3['a'] == [1, 2, 3])\n\n\[email protected]('not HAS_H5PY')\ndef test_read_fileobj(tmpdir):\n\n test_file = str(tmpdir.join('test.hdf5'))\n\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path='the_table')\n\n import h5py\n with h5py.File(test_file, 'r') as input_file:\n t2 = Table.read(input_file, path='the_table')\n assert np.all(t2['a'] == [1, 2, 3])\n\n\[email protected]('not HAS_H5PY')\ndef test_read_filobj_path(tmpdir):\n\n test_file = str(tmpdir.join('test.hdf5'))\n\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path='path/to/data/the_table')\n\n import h5py\n with h5py.File(test_file, 'r') as input_file:\n t2 = Table.read(input_file, path='path/to/data/the_table')\n assert np.all(t2['a'] == [1, 2, 3])\n\n\[email protected]('not HAS_H5PY')\ndef test_read_filobj_group_path(tmpdir):\n\n test_file = str(tmpdir.join('test.hdf5'))\n\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(test_file, path='path/to/data/the_table')\n\n import h5py\n with h5py.File(test_file, 'r') as input_file:\n t2 = Table.read(input_file['path/to'], path='data/the_table')\n assert np.all(t2['a'] == [1, 2, 3])\n\n\[email protected]('not HAS_H5PY')\ndef test_read_wrong_fileobj():\n\n class FakeFile:\n def read(self):\n pass\n\n f = FakeFile()\n\n with pytest.raises(TypeError) as exc:\n t1 = Table.read(f, format='hdf5')\n assert exc.value.args[0] == 'h5py can only open regular files'\n\n\[email protected]('not HAS_H5PY')\ndef test_write_fileobj(tmpdir):\n\n test_file = str(tmpdir.join('test.hdf5'))\n\n import h5py\n with h5py.File(test_file, 'w') as output_file:\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(output_file, path='the_table')\n\n t2 = Table.read(test_file, path='the_table')\n assert np.all(t2['a'] == [1, 2, 3])\n\n\[email protected]('not HAS_H5PY')\ndef test_write_create_dataset_kwargs(tmpdir):\n\n test_file = str(tmpdir.join('test.hdf5'))\n the_path = 'the_table'\n\n import h5py\n with h5py.File(test_file, 'w') as output_file:\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(output_file, path=the_path,\n maxshape=(None, ))\n\n # A roundabout way of checking this, but the table created above should be\n # resizable if the kwarg was passed through successfully\n t2 = Table()\n t2.add_column(Column(name='a', data=[4, 5]))\n with h5py.File(test_file, 'a') as output_file:\n output_file[the_path].resize((len(t1) + len(t2), ))\n output_file[the_path][len(t1):] = t2.as_array()\n\n t3 = Table.read(test_file, path='the_table')\n assert np.all(t3['a'] == [1, 2, 3, 4, 5])\n\n\[email protected]('not HAS_H5PY')\ndef test_write_filobj_group(tmpdir):\n\n test_file = str(tmpdir.join('test.hdf5'))\n\n import h5py\n with h5py.File(test_file, 'w') as output_file:\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(output_file, path='path/to/data/the_table')\n\n t2 = Table.read(test_file, path='path/to/data/the_table')\n assert np.all(t2['a'] == [1, 2, 3])\n\n\[email protected]('not HAS_H5PY')\ndef test_write_wrong_type():\n\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n with pytest.raises(TypeError) as exc:\n t1.write(1212, path='path/to/data/the_table', format='hdf5')\n assert exc.value.args[0] == ('output should be a string '\n 'or an h5py File or Group object')\n\n\[email protected]('not HAS_H5PY')\[email protected](('dtype'), ALL_DTYPES)\ndef test_preserve_single_dtypes(tmpdir, dtype):\n\n test_file = str(tmpdir.join('test.hdf5'))\n\n values = _default_values(dtype)\n\n t1 = Table()\n t1.add_column(Column(name='a', data=np.array(values, dtype=dtype)))\n t1.write(test_file, path='the_table')\n\n t2 = Table.read(test_file, path='the_table')\n\n assert np.all(t2['a'] == values)\n assert t2['a'].dtype == dtype\n\n\[email protected]('not HAS_H5PY')\ndef test_preserve_all_dtypes(tmpdir):\n\n test_file = str(tmpdir.join('test.hdf5'))\n\n t1 = Table()\n\n for dtype in ALL_DTYPES:\n values = _default_values(dtype)\n t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))\n\n t1.write(test_file, path='the_table')\n\n t2 = Table.read(test_file, path='the_table')\n\n for dtype in ALL_DTYPES:\n values = _default_values(dtype)\n assert np.all(t2[str(dtype)] == values)\n assert t2[str(dtype)].dtype == dtype\n\n\[email protected]('not HAS_H5PY')\ndef test_preserve_meta(tmpdir):\n\n test_file = str(tmpdir.join('test.hdf5'))\n\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n\n t1.meta['a'] = 1\n t1.meta['b'] = 'hello'\n t1.meta['c'] = 3.14159\n t1.meta['d'] = True\n t1.meta['e'] = np.array([1, 2, 3])\n\n t1.write(test_file, path='the_table')\n\n t2 = Table.read(test_file, path='the_table')\n\n for key in t1.meta:\n assert np.all(t1.meta[key] == t2.meta[key])\n\n\[email protected]('not HAS_H5PY or not HAS_YAML')\ndef test_preserve_serialized(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n\n t1 = Table()\n t1['a'] = Column(data=[1, 2, 3], unit=\"s\")\n t1['a'].meta['a0'] = \"A0\"\n t1['a'].meta['a1'] = {\"a1\": [0, 1]}\n t1['a'].format = '7.3f'\n t1['a'].description = 'A column'\n t1.meta['b'] = 1\n t1.meta['c'] = {\"c0\": [0, 1]}\n\n t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)\n\n t2 = Table.read(test_file, path='the_table')\n\n assert t1['a'].unit == t2['a'].unit\n assert t1['a'].format == t2['a'].format\n assert t1['a'].description == t2['a'].description\n assert t1['a'].meta == t2['a'].meta\n assert t1.meta == t2.meta\n\n\[email protected]('not HAS_H5PY or not HAS_YAML')\ndef test_preserve_serialized_old_meta_format(tmpdir):\n \"\"\"Test the old meta format\n\n Only for some files created prior to v4.0, in compatibility mode.\n \"\"\"\n test_file = get_pkg_data_filename('data/old_meta_example.hdf5')\n\n t1 = Table()\n t1['a'] = Column(data=[1, 2, 3], unit=\"s\")\n t1['a'].meta['a0'] = \"A0\"\n t1['a'].meta['a1'] = {\"a1\": [0, 1]}\n t1['a'].format = '7.3f'\n t1['a'].description = 'A column'\n t1.meta['b'] = 1\n t1.meta['c'] = {\"c0\": [0, 1]}\n\n t2 = Table.read(test_file, path='the_table')\n\n assert t1['a'].unit == t2['a'].unit\n assert t1['a'].format == t2['a'].format\n assert t1['a'].description == t2['a'].description\n assert t1['a'].meta == t2['a'].meta\n assert t1.meta == t2.meta\n\n\[email protected]('not HAS_H5PY or not HAS_YAML')\ndef test_preserve_serialized_in_complicated_path(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n\n t1 = Table()\n t1['a'] = Column(data=[1, 2, 3], unit=\"s\")\n t1['a'].meta['a0'] = \"A0\"\n t1['a'].meta['a1'] = {\"a1\": [0, 1]}\n t1['a'].format = '7.3f'\n t1['a'].description = 'A column'\n t1.meta['b'] = 1\n t1.meta['c'] = {\"c0\": [0, 1]}\n\n t1.write(test_file, path='the_table/complicated/path', serialize_meta=True,\n overwrite=True)\n\n t2 = Table.read(test_file, path='the_table/complicated/path')\n\n assert t1['a'].format == t2['a'].format\n assert t1['a'].unit == t2['a'].unit\n assert t1['a'].description == t2['a'].description\n assert t1['a'].meta == t2['a'].meta\n assert t1.meta == t2.meta\n\n\[email protected]('not HAS_H5PY or not HAS_YAML')\ndef test_metadata_very_large(tmpdir):\n \"\"\"Test that very large datasets work, now!\"\"\"\n test_file = str(tmpdir.join('test.hdf5'))\n\n t1 = Table()\n t1['a'] = Column(data=[1, 2, 3], unit=\"s\")\n t1['a'].meta['a0'] = \"A0\"\n t1['a'].meta['a1'] = {\"a1\": [0, 1]}\n t1['a'].format = '7.3f'\n t1['a'].description = 'A column'\n t1.meta['b'] = 1\n t1.meta['c'] = {\"c0\": [0, 1]}\n t1.meta[\"meta_big\"] = \"0\" * (2 ** 16 + 1)\n t1.meta[\"meta_biggerstill\"] = \"0\" * (2 ** 18)\n\n t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)\n\n t2 = Table.read(test_file, path='the_table')\n\n assert t1['a'].unit == t2['a'].unit\n assert t1['a'].format == t2['a'].format\n assert t1['a'].description == t2['a'].description\n assert t1['a'].meta == t2['a'].meta\n assert t1.meta == t2.meta\n\n\[email protected]('not HAS_H5PY')\ndef test_skip_meta(tmpdir):\n\n test_file = str(tmpdir.join('test.hdf5'))\n\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n\n t1.meta['a'] = 1\n t1.meta['b'] = 'hello'\n t1.meta['c'] = 3.14159\n t1.meta['d'] = True\n t1.meta['e'] = np.array([1, 2, 3])\n t1.meta['f'] = str\n\n with catch_warnings() as w:\n t1.write(test_file, path='the_table')\n assert len(w) == 1\n assert str(w[0].message).startswith(\n \"Attribute `f` of type {} cannot be written to HDF5 files - skipping\".format(type(t1.meta['f'])))\n\n\[email protected]('not HAS_H5PY or not HAS_YAML')\ndef test_fail_meta_serialize(tmpdir):\n\n test_file = str(tmpdir.join('test.hdf5'))\n\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.meta['f'] = str\n\n with pytest.raises(Exception) as err:\n t1.write(test_file, path='the_table', serialize_meta=True)\n assert \"cannot represent an object\" in str(err.value)\n assert \"<class 'str'>\" in str(err.value)\n\n\[email protected]('not HAS_H5PY')\ndef test_read_h5py_objects(tmpdir):\n\n # Regression test - ensure that Datasets are recognized automatically\n\n test_file = str(tmpdir.join('test.hdf5'))\n\n import h5py\n with h5py.File(test_file, 'w') as output_file:\n t1 = Table()\n t1.add_column(Column(name='a', data=[1, 2, 3]))\n t1.write(output_file, path='the_table')\n\n f = h5py.File(test_file, mode='r')\n\n t2 = Table.read(f, path='the_table')\n assert np.all(t2['a'] == [1, 2, 3])\n\n t3 = Table.read(f['/'], path='the_table')\n assert np.all(t3['a'] == [1, 2, 3])\n\n t4 = Table.read(f['the_table'])\n assert np.all(t4['a'] == [1, 2, 3])\n\n f.close() # don't raise an error in 'test --open-files'\n\n\[email protected]('not HAS_H5PY')\ndef test_read_write_unicode_to_hdf5(tmpdir):\n test_file = str(tmpdir.join('test.hdf5'))\n\n t = Table()\n t['p'] = ['a', 'b', 'c']\n t['q'] = [1, 2, 3]\n t['r'] = [b'a', b'b', b'c']\n t['s'] = [\"\\u2119\", \"\\u01b4\", \"\\u2602\"]\n t.write(test_file, path='the_table', overwrite=True)\n\n t1 = Table.read(test_file, path='the_table', character_as_bytes=False)\n for col, col1 in zip(t.itercols(), t1.itercols()):\n assert np.all(col == col1)\n assert np.all(t1['p'].info.dtype.kind == \"U\")\n assert np.all(t1['q'].info.dtype.kind == \"i\")\n assert np.all(t1['r'].info.dtype.kind == \"U\")\n assert np.all(t1['s'].info.dtype.kind == \"U\")\n\n # Test default (character_as_bytes=True)\n t2 = Table.read(test_file, path='the_table')\n for col, col1 in zip(t.itercols(), t2.itercols()):\n assert np.all(col == col1)\n assert np.all(t2['p'].info.dtype.kind == \"S\")\n assert np.all(t2['q'].info.dtype.kind == \"i\")\n assert np.all(t2['r'].info.dtype.kind == \"S\")\n assert np.all(t2['s'].info.dtype.kind == \"S\")\n\n\ndef assert_objects_equal(obj1, obj2, attrs, compare_class=True):\n if compare_class:\n assert obj1.__class__ is obj2.__class__\n\n info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta']\n for attr in attrs + info_attrs:\n a1 = obj1\n a2 = obj2\n for subattr in attr.split('.'):\n try:\n a1 = getattr(a1, subattr)\n a2 = getattr(a2, subattr)\n except AttributeError:\n a1 = a1[subattr]\n a2 = a2[subattr]\n\n # Mixin info.meta can None instead of empty OrderedDict(), #6720 would\n # fix this.\n if attr == 'info.meta':\n if a1 is None:\n a1 = {}\n if a2 is None:\n a2 = {}\n\n assert np.all(a1 == a2)\n\n# Testing HDF5 table read/write with mixins. This is mostly\n# copied from FITS mixin testing, and it might be good to unify it.\n\n\nel = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km)\nel2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)\nsr = SphericalRepresentation(\n [0, 1]*u.deg, [2, 3]*u.deg, 1*u.kpc)\ncr = CartesianRepresentation(\n [0, 1]*u.pc, [4, 5]*u.pc, [8, 6]*u.pc)\nsd = SphericalCosLatDifferential(\n [0, 1]*u.mas/u.yr, [0, 1]*u.mas/u.yr, 10*u.km/u.s)\nsrd = SphericalRepresentation(sr, differentials=sd)\nsc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4',\n obstime='J1990.5')\nscc = sc.copy()\nscc.representation_type = 'cartesian'\ntm = Time([2450814.5, 2450815.5], format='jd', scale='tai', location=el)\n\n# NOTE: in the test below the name of the column \"x\" for the Quantity is\n# important since it tests the fix for #10215 (namespace clash, where \"x\"\n# clashes with \"el2.x\").\nmixin_cols = {\n 'tm': tm,\n 'dt': TimeDelta([1, 2] * u.day),\n 'sc': sc,\n 'scc': scc,\n 'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',\n obstime=['J1990.5', 'J1991.5']),\n 'x': [1, 2] * u.m,\n 'qdb': [10, 20] * u.dB(u.mW),\n 'qdex': [4.5, 5.5] * u.dex(u.cm/u.s**2),\n 'qmag': [21, 22] * u.ABmag,\n 'lat': Latitude([1, 2] * u.deg),\n 'lon': Longitude([1, 2] * u.deg, wrap_angle=180. * u.deg),\n 'ang': Angle([1, 2] * u.deg),\n 'el2': el2,\n 'sr': sr,\n 'cr': cr,\n 'sd': sd,\n 'srd': srd,\n}\n\ntime_attrs = ['value', 'shape', 'format', 'scale', 'location']\ncompare_attrs = {\n 'c1': ['data'],\n 'c2': ['data'],\n 'tm': time_attrs,\n 'dt': ['shape', 'value', 'format', 'scale'],\n 'sc': ['ra', 'dec', 'representation_type', 'frame.name'],\n 'scc': ['x', 'y', 'z', 'representation_type', 'frame.name'],\n 'scd': ['ra', 'dec', 'distance', 'representation_type', 'frame.name'],\n 'x': ['value', 'unit'],\n 'qdb': ['value', 'unit'],\n 'qdex': ['value', 'unit'],\n 'qmag': ['value', 'unit'],\n 'lon': ['value', 'unit', 'wrap_angle'],\n 'lat': ['value', 'unit'],\n 'ang': ['value', 'unit'],\n 'el2': ['x', 'y', 'z', 'ellipsoid'],\n 'nd': ['x', 'y', 'z'],\n 'sr': ['lon', 'lat', 'distance'],\n 'cr': ['x', 'y', 'z'],\n 'sd': ['d_lon_coslat', 'd_lat', 'd_distance'],\n 'srd': ['lon', 'lat', 'distance', 'differentials.s.d_lon_coslat',\n 'differentials.s.d_lat', 'differentials.s.d_distance'],\n}\n\n\[email protected]('not HAS_H5PY or not HAS_YAML')\ndef test_hdf5_mixins_qtable_to_table(tmpdir):\n \"\"\"Test writing as QTable and reading as Table. Ensure correct classes\n come out.\n \"\"\"\n filename = str(tmpdir.join('test_simple.hdf5'))\n\n names = sorted(mixin_cols)\n\n t = QTable([mixin_cols[name] for name in names], names=names)\n t.write(filename, format='hdf5', path='root', serialize_meta=True)\n t2 = Table.read(filename, format='hdf5', path='root')\n\n assert t.colnames == t2.colnames\n\n for name, col in t.columns.items():\n col2 = t2[name]\n\n # Special-case Time, which does not yet support round-tripping\n # the format.\n if isinstance(col2, Time):\n col2.format = col.format\n\n attrs = compare_attrs[name]\n compare_class = True\n\n if isinstance(col.info, QuantityInfo):\n # Downgrade Quantity to Column + unit\n assert type(col2) is Column\n # Class-specific attributes like `value` or `wrap_angle` are lost.\n attrs = ['unit']\n compare_class = False\n # Compare data values here (assert_objects_equal doesn't know how in this case)\n assert np.all(col.value == col2)\n\n assert_objects_equal(col, col2, attrs, compare_class)\n\n\[email protected]('not HAS_H5PY or not HAS_YAML')\[email protected]('table_cls', (Table, QTable))\ndef test_hdf5_mixins_as_one(table_cls, tmpdir):\n \"\"\"Test write/read all cols at once and validate intermediate column names\"\"\"\n filename = str(tmpdir.join('test_simple.hdf5'))\n names = sorted(mixin_cols)\n\n serialized_names = ['ang',\n 'cr.x', 'cr.y', 'cr.z',\n 'dt.jd1', 'dt.jd2',\n 'el2.x', 'el2.y', 'el2.z',\n 'lat',\n 'lon',\n 'qdb',\n 'qdex',\n 'qmag',\n 'sc.ra', 'sc.dec',\n 'scc.x', 'scc.y', 'scc.z',\n 'scd.ra', 'scd.dec', 'scd.distance',\n 'scd.obstime.jd1', 'scd.obstime.jd2',\n 'sd.d_lon_coslat', 'sd.d_lat', 'sd.d_distance',\n 'sr.lon', 'sr.lat', 'sr.distance',\n 'srd.lon', 'srd.lat', 'srd.distance',\n 'srd.differentials.s.d_lon_coslat',\n 'srd.differentials.s.d_lat',\n 'srd.differentials.s.d_distance',\n 'tm.jd1', 'tm.jd2',\n 'x',\n ]\n\n t = table_cls([mixin_cols[name] for name in names], names=names)\n t.meta['C'] = 'spam'\n t.meta['comments'] = ['this', 'is', 'a', 'comment']\n t.meta['history'] = ['first', 'second', 'third']\n\n t.write(filename, format=\"hdf5\", path='root', serialize_meta=True)\n\n t2 = table_cls.read(filename, format='hdf5', path='root')\n assert t2.meta['C'] == 'spam'\n assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']\n assert t2.meta['history'] == ['first', 'second', 'third']\n\n assert t.colnames == t2.colnames\n\n # Read directly via hdf5 and confirm column names\n h5 = h5py.File(filename, 'r')\n assert list(h5['root'].dtype.names) == serialized_names\n h5.close()\n\n\[email protected]('not HAS_H5PY or not HAS_YAML')\[email protected]('name_col', list(mixin_cols.items()))\[email protected]('table_cls', (Table, QTable))\ndef test_hdf5_mixins_per_column(table_cls, name_col, tmpdir):\n \"\"\"Test write/read one col at a time and do detailed validation\"\"\"\n filename = str(tmpdir.join('test_simple.hdf5'))\n name, col = name_col\n\n c = [1.0, 2.0]\n t = table_cls([c, col, c], names=['c1', name, 'c2'])\n t[name].info.description = 'my description'\n t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}\n\n if not t.has_mixin_columns:\n pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')\n\n if isinstance(t[name], NdarrayMixin):\n pytest.xfail('NdarrayMixin not supported')\n\n t.write(filename, format=\"hdf5\", path='root', serialize_meta=True)\n t2 = table_cls.read(filename, format='hdf5', path='root')\n\n assert t.colnames == t2.colnames\n\n for colname in t.colnames:\n assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])\n\n # Special case to make sure Column type doesn't leak into Time class data\n if name.startswith('tm'):\n assert t2[name]._time.jd1.__class__ is np.ndarray\n assert t2[name]._time.jd2.__class__ is np.ndarray\n\n\[email protected]('HAS_YAML or not HAS_H5PY')\ndef test_warn_for_dropped_info_attributes(tmpdir):\n filename = str(tmpdir.join('test.hdf5'))\n t = Table([[1, 2]])\n t['col0'].info.description = 'hello'\n with catch_warnings() as warns:\n t.write(filename, path='root', serialize_meta=False)\n assert len(warns) == 1\n assert str(warns[0].message).startswith(\n \"table contains column(s) with defined 'unit'\")\n\n\[email protected]('HAS_YAML or not HAS_H5PY')\ndef test_error_for_mixins_but_no_yaml(tmpdir):\n filename = str(tmpdir.join('test.hdf5'))\n t = Table([mixin_cols['sc']])\n with pytest.raises(TypeError) as err:\n t.write(filename, path='root', serialize_meta=True)\n assert \"cannot write type SkyCoord column 'col0' to HDF5 without PyYAML\" in str(err.value)\n\n\[email protected]('not HAS_YAML or not HAS_H5PY')\ndef test_round_trip_masked_table_default(tmpdir):\n \"\"\"Test round-trip of MaskedColumn through HDF5 using default serialization\n that writes a separate mask column. Note:\n\n >>> simple_table(masked=True)\n <Table masked=True length=3>\n a b c\n int64 float64 str1\n ----- ------- ----\n -- 1.0 c\n 2 2.0 --\n 3 -- e\n \"\"\"\n filename = str(tmpdir.join('test.h5'))\n\n t = simple_table(masked=True) # int, float, and str cols with one masked element\n t['c'] = [b'c', b'd', b'e']\n t['c'].mask[1] = True\n t.write(filename, format='hdf5', path='root', serialize_meta=True)\n\n t2 = Table.read(filename)\n assert t2.masked is False\n assert t2.colnames == t.colnames\n for name in t2.colnames:\n assert np.all(t2[name].mask == t[name].mask)\n assert np.all(t2[name] == t[name])\n\n # Data under the mask round-trips also (unmask data to show this).\n t[name].mask = False\n t2[name].mask = False\n assert np.all(t2[name] == t[name])\n\n\[email protected]('not HAS_YAML or not HAS_H5PY')\ndef test_overwrite_serialized_meta():\n # This used to cause an error because the meta data table\n # was not removed from the existing file.\n\n with h5py.File('test_data.h5', 'w', driver='core', backing_store=False) as out:\n t1 = Table()\n t1.add_column(Column(data=[4, 8, 15], unit='cm'))\n t1.write(out, path='data', serialize_meta=True)\n\n t2 = Table.read(out, path='data')\n assert all(t1 == t2)\n assert t1.info(out=None) == t2.info(out=None)\n\n t3 = Table()\n t3.add_column(Column(data=[16, 23, 42], unit='g'))\n t3.write(out, path='data', serialize_meta=True, append=True, overwrite=True)\n\n t2 = Table.read(out, path='data')\n assert all(t3 == t2)\n assert t3.info(out=None) == t2.info(out=None)\n"
] |
[
[
"numpy.testing.assert_array_equal"
],
[
"numpy.linspace",
"numpy.arange",
"numpy.random.normal",
"numpy.random.sample",
"numpy.exp"
],
[
"pandas.Series",
"numpy.asarray",
"numpy.around",
"numpy.issubdtype",
"pandas.DataFrame",
"numpy.dtype",
"numpy.timedelta64",
"numpy.broadcast",
"numpy.datetime64",
"numpy.any",
"numpy.broadcast_to",
"numpy.isscalar",
"numpy.ma.array",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.all",
"numpy.array"
]
] |
tomato18/openpilot
|
[
"2b4b53f0405c92a789427e085c797a8719658148"
] |
[
"selfdrive/controls/lib/planner.py"
] |
[
"#!/usr/bin/env python3\nimport math\nimport numpy as np\nfrom common.params import Params\nfrom common.numpy_fast import interp\n\nimport cereal.messaging as messaging\nfrom cereal import car\nfrom common.realtime import sec_since_boot\nfrom selfdrive.swaglog import cloudlog\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.controls.lib.speed_smoother import speed_smoother\nfrom selfdrive.controls.lib.longcontrol import LongCtrlState, MIN_CAN_SPEED\nfrom selfdrive.controls.lib.fcw import FCWChecker\nfrom selfdrive.controls.lib.long_mpc import LongitudinalMpc\nfrom selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX\n\nMAX_SPEED = 255.0 #kpH\n\nLON_MPC_STEP = 0.2 # first step is 0.2s\nMAX_SPEED_ERROR = 2.0\nAWARENESS_DECEL = -0.2 # car smoothly decel at .2m/s^2 when user is distracted\n\n# lookup tables VS speed to determine min and max accels in cruise\n# make sure these accelerations are smaller than mpc limits\n_A_CRUISE_MIN_V = [-1.5, -1.5, -1.5, -1., -.5]\n_A_CRUISE_MIN_V_FOLLOWING = [-3.5, -3.5, -3., -1.5, -1.]\n_A_CRUISE_MIN_BP = [ 0., 5., 10., 20., 40.]\n\n# need fast accel at very low speed for stop and go\n# make sure these accelerations are smaller than mpc limits\n_A_CRUISE_MAX_V = [.5, 1.5, 0.65, .4]\n_A_CRUISE_MAX_V_FOLLOWING = [1., 2., 0.65, .4]\n_A_CRUISE_MAX_BP = [0., 3., 22.5, 40.]\n\n# Lookup table for turns\n_A_TOTAL_MAX_V = [1.7, 3.2]\n_A_TOTAL_MAX_BP = [20., 40.]\n\n# 75th percentile\nSPEED_PERCENTILE_IDX = 7\n\n\ndef calc_cruise_accel_limits(v_ego, following):\n if following:\n a_cruise_max = interp(v_ego, _A_CRUISE_MAX_BP, _A_CRUISE_MAX_V_FOLLOWING)\n a_cruise_min = interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V_FOLLOWING)\n else:\n a_cruise_max = interp(v_ego, _A_CRUISE_MAX_BP, _A_CRUISE_MAX_V)\n a_cruise_min = interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V)\n return np.vstack([a_cruise_min, a_cruise_max])\n\n\ndef limit_accel_in_turns(v_ego, angle_steers, a_target, CP):\n \"\"\"\n This function returns a limited long acceleration allowed, depending on the existing lateral acceleration\n this should avoid accelerating when losing the target in turns\n \"\"\"\n\n a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)\n a_y = v_ego**2 * angle_steers * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)\n a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))\n\n return [a_target[0], min(a_target[1], a_x_allowed)]\n\n\nclass Planner():\n def __init__(self, CP):\n self.CP = CP\n\n self.mpc1 = LongitudinalMpc(1)\n self.mpc2 = LongitudinalMpc(2)\n\n self.v_acc_start = 0.0\n self.a_acc_start = 0.0\n\n self.v_acc = 0.0\n self.v_acc_future = 0.0\n self.a_acc = 0.0\n self.v_cruise = 0.0\n self.a_cruise = 0.0\n\n self.longitudinalPlanSource = 'cruise'\n self.fcw_checker = FCWChecker()\n self.path_x = np.arange(192)\n\n self.params = Params()\n self.first_loop = True\n\n def choose_solution(self, v_cruise_setpoint, enabled):\n if enabled:\n solutions = {'cruise': self.v_cruise}\n if self.mpc1.prev_lead_status:\n solutions['mpc1'] = self.mpc1.v_mpc\n if self.mpc2.prev_lead_status:\n solutions['mpc2'] = self.mpc2.v_mpc\n\n slowest = min(solutions, key=solutions.get)\n\n self.longitudinalPlanSource = slowest\n # Choose lowest of MPC and cruise\n if slowest == 'mpc1':\n self.v_acc = self.mpc1.v_mpc\n self.a_acc = self.mpc1.a_mpc\n elif slowest == 'mpc2':\n self.v_acc = self.mpc2.v_mpc\n self.a_acc = self.mpc2.a_mpc\n elif slowest == 'cruise':\n self.v_acc = self.v_cruise\n self.a_acc = self.a_cruise\n\n self.v_acc_future = min([self.mpc1.v_mpc_future, self.mpc2.v_mpc_future, v_cruise_setpoint])\n\n def update(self, sm, pm, CP, VM, PP):\n \"\"\"Gets called when new radarState is available\"\"\"\n cur_time = sec_since_boot()\n v_ego = sm['carState'].vEgo\n\n long_control_state = sm['controlsState'].longControlState\n v_cruise_kph = sm['controlsState'].vCruise\n force_slow_decel = sm['controlsState'].forceDecel\n\n v_cruise_kph = min(v_cruise_kph, V_CRUISE_MAX)\n v_cruise_setpoint = v_cruise_kph * CV.KPH_TO_MS\n\n lead_1 = sm['radarState'].leadOne\n lead_2 = sm['radarState'].leadTwo\n\n enabled = (long_control_state == LongCtrlState.pid) or (long_control_state == LongCtrlState.stopping)\n following = lead_1.status and lead_1.dRel < 45.0 and lead_1.vLeadK > v_ego and lead_1.aLeadK > 0.0\n\n # Calculate speed for normal cruise control\n if enabled and not self.first_loop and not sm['carState'].gasPressed:\n accel_limits = [float(x) for x in calc_cruise_accel_limits(v_ego, following)]\n jerk_limits = [min(-0.1, accel_limits[0]), max(0.1, accel_limits[1])] # TODO: make a separate lookup for jerk tuning\n accel_limits_turns = limit_accel_in_turns(v_ego, sm['carState'].steeringAngle, accel_limits, self.CP)\n\n if force_slow_decel:\n # if required so, force a smooth deceleration\n accel_limits_turns[1] = min(accel_limits_turns[1], AWARENESS_DECEL)\n accel_limits_turns[0] = min(accel_limits_turns[0], accel_limits_turns[1])\n\n self.v_cruise, self.a_cruise = speed_smoother(self.v_acc_start, self.a_acc_start,\n v_cruise_setpoint,\n accel_limits_turns[1], accel_limits_turns[0],\n jerk_limits[1], jerk_limits[0],\n LON_MPC_STEP)\n\n # cruise speed can't be negative even is user is distracted\n self.v_cruise = max(self.v_cruise, 0.)\n else:\n starting = long_control_state == LongCtrlState.starting\n a_ego = min(sm['carState'].aEgo, 0.0)\n reset_speed = MIN_CAN_SPEED if starting else v_ego\n reset_accel = self.CP.startAccel if starting else a_ego\n self.v_acc = reset_speed\n self.a_acc = reset_accel\n self.v_acc_start = reset_speed\n self.a_acc_start = reset_accel\n self.v_cruise = reset_speed\n self.a_cruise = reset_accel\n\n self.mpc1.set_cur_state(self.v_acc_start, self.a_acc_start)\n self.mpc2.set_cur_state(self.v_acc_start, self.a_acc_start)\n\n self.mpc1.update(pm, sm['carState'], lead_1, v_cruise_setpoint)\n self.mpc2.update(pm, sm['carState'], lead_2, v_cruise_setpoint)\n\n self.choose_solution(v_cruise_setpoint, enabled)\n\n # determine fcw\n if self.mpc1.new_lead:\n self.fcw_checker.reset_lead(cur_time)\n\n blinkers = sm['carState'].leftBlinker or sm['carState'].rightBlinker\n fcw = self.fcw_checker.update(self.mpc1.mpc_solution, cur_time,\n sm['controlsState'].active,\n v_ego, sm['carState'].aEgo,\n lead_1.dRel, lead_1.vLead, lead_1.aLeadK,\n lead_1.yRel, lead_1.vLat,\n lead_1.fcw, blinkers) and not sm['carState'].brakePressed\n if fcw:\n cloudlog.info(\"FCW triggered %s\", self.fcw_checker.counters)\n\n radar_dead = not sm.alive['radarState']\n\n radar_errors = list(sm['radarState'].radarErrors)\n radar_fault = car.RadarData.Error.fault in radar_errors\n radar_can_error = car.RadarData.Error.canError in radar_errors\n\n # **** send the plan ****\n plan_send = messaging.new_message('plan')\n\n plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'radarState'])\n\n plan_send.plan.mdMonoTime = sm.logMonoTime['model']\n plan_send.plan.radarStateMonoTime = sm.logMonoTime['radarState']\n\n # longitudal plan\n plan_send.plan.vCruise = float(self.v_cruise)\n plan_send.plan.aCruise = float(self.a_cruise)\n plan_send.plan.vStart = float(self.v_acc_start)\n plan_send.plan.aStart = float(self.a_acc_start)\n plan_send.plan.vTarget = float(self.v_acc)\n plan_send.plan.aTarget = float(self.a_acc)\n plan_send.plan.vTargetFuture = float(self.v_acc_future)\n plan_send.plan.hasLead = self.mpc1.prev_lead_status\n plan_send.plan.longitudinalPlanSource = self.longitudinalPlanSource\n\n radar_valid = not (radar_dead or radar_fault)\n plan_send.plan.radarValid = bool(radar_valid)\n plan_send.plan.radarCanError = bool(radar_can_error)\n\n plan_send.plan.processingDelay = (plan_send.logMonoTime / 1e9) - sm.rcv_time['radarState']\n\n # Send out fcw\n plan_send.plan.fcw = fcw\n\n pm.send('plan', plan_send)\n\n # Interpolate 0.05 seconds and save as starting point for next iteration\n a_acc_sol = self.a_acc_start + (CP.radarTimeStep / LON_MPC_STEP) * (self.a_acc - self.a_acc_start)\n v_acc_sol = self.v_acc_start + CP.radarTimeStep * (a_acc_sol + self.a_acc_start) / 2.0\n self.v_acc_start = v_acc_sol\n self.a_acc_start = a_acc_sol\n\n self.first_loop = False\n"
] |
[
[
"numpy.arange",
"numpy.vstack"
]
] |
ksangeek/cuml
|
[
"1f508d989c1d7974c475f115f6ea063aea8aab39",
"1f508d989c1d7974c475f115f6ea063aea8aab39"
] |
[
"python/cuml/test/test_random_projection.py",
"python/cuml/test/test_lasso.py"
] |
[
"# Copyright (c) 2018-2019, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom cuml.random_projection import GaussianRandomProjection, \\\n SparseRandomProjection\nfrom cuml.random_projection import johnson_lindenstrauss_min_dim \\\n as cuml_johnson_lindenstrauss_min_dim\nfrom sklearn.random_projection import johnson_lindenstrauss_min_dim \\\n as sklearn_johnson_lindenstrauss_min_dim\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom scipy.spatial.distance import pdist\n\nimport cudf\nimport numpy as np\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('input_type', ['dataframe', 'ndarray'])\[email protected]('method', ['gaussian', 'sparse'])\ndef test_random_projection_fit(datatype, input_type, method):\n # dataset generation\n data, target = make_blobs(n_samples=800, centers=400, n_features=3000)\n\n # conversion to input_type\n data = data.astype(datatype)\n target = target.astype(datatype)\n\n # creation of model\n if method == 'gaussian':\n model = GaussianRandomProjection(eps=0.2)\n else:\n model = SparseRandomProjection(eps=0.2)\n\n # fitting the model\n if input_type == 'dataframe':\n gdf = cudf.DataFrame()\n for i in range(data.shape[1]):\n gdf[str(i)] = np.asarray(data[:, i], dtype=datatype)\n model.fit(gdf)\n else:\n model.fit(data)\n\n assert True # Did not crash\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('input_type', ['dataframe', 'ndarray'])\[email protected]('method', ['gaussian', 'sparse'])\ndef test_random_projection_fit_transform(datatype, input_type, method):\n eps = 0.2\n\n # dataset generation\n data, target = make_blobs(n_samples=800, centers=400, n_features=3000)\n\n # conversion to input_type\n data = data.astype(datatype)\n target = target.astype(datatype)\n\n # creation of model\n if method == 'gaussian':\n model = GaussianRandomProjection(eps=eps)\n else:\n model = SparseRandomProjection(eps=eps)\n\n # fitting the model\n if input_type == 'dataframe':\n gdf = cudf.DataFrame()\n for i in range(data.shape[1]):\n gdf[str(i)] = np.asarray(data[:, i], dtype=datatype)\n model.fit(gdf)\n else:\n model.fit(data)\n\n # applying transformation\n if input_type == 'dataframe':\n transformed_data = model.transform(gdf).as_matrix()\n else:\n transformed_data = model.transform(data)\n\n original_pdist = pdist(data)\n embedded_pdist = pdist(transformed_data)\n\n # check JL lemma\n assert (np.all(((1.0 - eps) * original_pdist) <= embedded_pdist) and\n np.all(embedded_pdist <= ((1.0 + eps) * original_pdist)))\n\n\ndef test_johnson_lindenstrauss_min_dim():\n n_tests = 10000\n n_samples = np.random.randint(low=50, high=1e10, size=n_tests)\n eps_values = np.random.rand(n_tests) + 1e-17 # range (0,1)\n tests = zip(n_samples, eps_values)\n\n for n_samples, eps in tests:\n cuml_value = cuml_johnson_lindenstrauss_min_dim(n_samples, eps)\n sklearn_value = sklearn_johnson_lindenstrauss_min_dim(n_samples, eps)\n assert cuml_value == sklearn_value\n",
"# Copyright (c) 2018, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport cudf\nimport pytest\nimport numpy as np\nimport pandas as pd\nfrom cuml.linear_model import Lasso as cuLasso\nfrom sklearn.linear_model import Lasso\nfrom sklearn.datasets import make_regression\nfrom sklearn.metrics import mean_squared_error\nfrom cuml.test.utils import array_equal\n\n\[email protected]('input_type', ['dataframe', 'ndarray'])\[email protected]('selection', ['cyclic', 'random'])\ndef test_lasso(input_type, selection):\n n_samples = 20\n n_feats = 5\n dtype = np.float64\n train_rows = np.int32(n_samples*0.8)\n X, y = make_regression(n_samples=n_samples, n_features=n_feats,\n n_informative=n_feats, random_state=0)\n X_test = np.array(X[train_rows:, 0:]).astype(dtype)\n y_train = np.array(y[0: train_rows, ]).astype(dtype)\n y_test = np.array(y[train_rows:, ]).astype(dtype)\n X_train = np.array(X[0: train_rows, :]).astype(dtype)\n\n sklas = Lasso(alpha=np.array([0.01]), fit_intercept=True,\n normalize=False, max_iter=1000,\n selection=selection, tol=1e-10)\n sklas.fit(X_train, y_train)\n sk_predict = sklas.predict(X_test)\n\n cu_lasso = cuLasso(alpha=np.array([0.01]), fit_intercept=True,\n normalize=False, max_iter=1000,\n selection=selection, tol=1e-10)\n\n if input_type == 'dataframe':\n X_train = pd.DataFrame(\n {'fea%d' % i: X_train[0:, i] for i in range(\n X_train.shape[1])})\n y_train = pd.DataFrame(\n {'fea0': y[0:train_rows, ]})\n X_test = pd.DataFrame(\n {'fea%d' % i: X_test[0:, i] for i in range(\n X_test.shape[1])})\n X_cudf = cudf.DataFrame.from_pandas(X_train)\n y_cudf = y_train.values\n y_cudf = y_cudf[:, 0]\n y_cudf = cudf.Series(y_cudf)\n X_cudf_test = cudf.DataFrame.from_pandas(X_test)\n cu_lasso.fit(X_cudf, y_cudf)\n cu_predict = cu_lasso.predict(X_cudf_test).to_array()\n\n else:\n cu_lasso.fit(X, y)\n cu_predict = cu_lasso.predict(X_test).to_array()\n\n error_sk = mean_squared_error(y_test, sk_predict)\n error_cu = mean_squared_error(y_test, cu_predict)\n assert array_equal(error_sk, error_cu, 1e-2, with_sign=True)\n"
] |
[
[
"numpy.asarray",
"numpy.all",
"scipy.spatial.distance.pdist",
"sklearn.random_projection.johnson_lindenstrauss_min_dim",
"numpy.random.rand",
"sklearn.datasets.samples_generator.make_blobs",
"numpy.random.randint"
],
[
"numpy.int32",
"pandas.DataFrame",
"sklearn.metrics.mean_squared_error",
"sklearn.datasets.make_regression",
"numpy.array"
]
] |
lmasieri/iot-central-docs-samples
|
[
"90b4d204a43c249481ddd574876e685b18b8e1e8"
] |
[
"databricks/IoT Central Analysis.py"
] |
[
"# Databricks notebook source\n# MAGIC %md # IoT Central streaming to Azure Databricks\n# MAGIC \n# MAGIC This python script is an example of how [IoT Central](https://azure.microsoft.com/services/iot-central/) can stream data to [Azure Databricks](https://azure.microsoft.com/services/databricks/) using Apache Spark. \n# MAGIC \n# MAGIC When this notebook runs in a Databricks workspace, the Python script:\n# MAGIC \n# MAGIC 1. Reads the streaming measurement data from from an IoT Central application.\n# MAGIC 1. Plots averaged humidity data by device to show a smoother plot.\n# MAGIC 1. Stores the data in the cluster.\n# MAGIC 1. Displays box plots with any outliers from the stored data.\n# MAGIC \n# MAGIC ## Configuring event hub connection strings\n# MAGIC \n# MAGIC IoT Central can be set up to export data to Azure Event Hubs using the **Continuous data export** feature. This example uses a single event hub for streaming telemetry. \n# MAGIC \n# MAGIC The connection string in the following cell is for the telemetry event hub. For more information, see the how-to guide [Extend Azure IoT Central with custom analytics](https://docs.microsoft.com/azure/iot-central/howto-create-custom-analytics).\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import *\n\n###### Event Hub Connection string ######\ntelementryEventHubConfig = {\n 'eventhubs.connectionString' : '{your Event Hubs connection string}'\n}\n\n# COMMAND ----------\n\n# MAGIC %md ##Helper functions\n# MAGIC These helper functions manipulate the Spark [DataFrames](https://docs.azuredatabricks.net/spark/latest/dataframes-datasets/index.html).\n# MAGIC \n# MAGIC #### Removing quotations\n# MAGIC Some data comes through from the event hub with quotation marks, you want to clean this.\n\n# COMMAND ----------\n\n@udf\ndef removeQuotations(value):\n return value.replace('\"', '')\n\n# COMMAND ----------\n\n# MAGIC %md #### Getting nested items from a dictionary\n# MAGIC You need to get items from deep in a dictionary that may not exist, this function checks if items exist, and if they do, retrieves them.\n\n# COMMAND ----------\n\n#Gets from a multi-level dictionary \ndef nestedGet(dictionary, nestedKeys):\n for key in nestedKeys:\n dictionary = dictionary.get(key, None)\n if dictionary is None:\n return None\n return dictionary\n\n# COMMAND ----------\n\n# MAGIC %md #### Body property extractor\n# MAGIC This function creates a [User-defined function (UDF)](https://docs.azuredatabricks.net/spark/latest/spark-sql/udf-python.html) that extracts an item nested in the JSON body of an event hub message.\n# MAGIC \n# MAGIC For example, `bodyPropertyExtractorBuilder(['properties', 'location'])` returns a UDF that extracts `body.properties.location` if it exists.\n\n# COMMAND ----------\n\nimport json\n\ndef bodyPropertyExtractorBuilder(nestedKeys):\n def bodyPropertyExtractor(body):\n decodedBody = json.loads(body.decode(\"utf-8\"));\n return nestedGet(decodedBody, nestedKeys)\n return bodyPropertyExtractor\n\n# COMMAND ----------\n\n# MAGIC %md ## Telemetry query\n# MAGIC #### Initial query\n# MAGIC \n# MAGIC This creates a streaming DataFrame from the telemtry event hub. A streaming DataFrame continuously updates as more data arrives.\n\n# COMMAND ----------\n\ntelemetryDF = spark \\\n .readStream \\\n .format(\"eventhubs\") \\\n .options(**telementryEventHubConfig) \\\n .load()\n\n\n# COMMAND ----------\n\n# MAGIC %md #### Extract the required data\n# MAGIC This creates a new streaming DataFrame that contains the:\n# MAGIC - Device Id from the event hub message's system properties\n# MAGIC - Enqueued time from the event hub message's system properties\n# MAGIC - humidity from the event hub message's body\n# MAGIC \n# MAGIC The code uses the `removeQuotations` and `bodyPropertyExtractorBuilder` functions defined previously.\n\n# COMMAND ----------\n\nhumidityUdf = udf(bodyPropertyExtractorBuilder(['humidity']), FloatType())\ntelemetryDF = telemetryDF.select(\n removeQuotations(telemetryDF.systemProperties['iothub-connection-device-id']).alias('deviceId'),\n removeQuotations(telemetryDF.systemProperties['iothub-enqueuedtime']).cast(\"timestamp\").alias('enqueuedtime'),\n humidityUdf('body').alias('humidity')\n )\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Plot the telemetry\n# MAGIC \n# MAGIC The following code uses a window to calculate rolling averages by device Id.\n# MAGIC \n# MAGIC Because the example is still using a streaming DataFrame, the chart updates continuously.\n\n# COMMAND ----------\n\nsmoothTelemetryDF = telemetryDF.groupBy(\n window('enqueuedtime', \"10 minutes\", \"5 minutes\"),\n 'deviceId'\n).agg({'humidity': 'avg'})\ndisplay(smoothTelemetryDF)\n\n# COMMAND ----------\n\n# MAGIC %md ## Analyze the telemetry further \n# MAGIC \n# MAGIC To perform more complex analysis, the following cell continuously writes the streaming data to a table in the cluster. The amount of data stored will continue to grow, so in a production system you should periodically delete or archive old telemetry data.\n# MAGIC \n# MAGIC ###Write streaming data query results to a database\n# MAGIC Writes the final telemetryDF DataFrame to a [database table](https://docs.azuredatabricks.net/user-guide/tables.html) in the cluster. You could choose to write the telemetry to another storage location such as an external database or blob store.\n# MAGIC \n# MAGIC For more information, see [Streaming Data Sources and Sinks](https://docs.azuredatabricks.net/spark/latest/structured-streaming/data-sources.html).\n\n# COMMAND ----------\n\ntelemetryDF \\\n .writeStream \\\n .outputMode(\"append\") \\\n .format(\"delta\") \\\n .option(\"checkpointLocation\", \"/delta/events/_checkpoints/etl-from-json\") \\\n .table(\"telemetry\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC Wait until some streaming data has been written to storage.\n\n# COMMAND ----------\n\nfrom time import sleep\nsleep(60) # wait until some telemtry has been written to storage\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Generate box plots\n# MAGIC The format of the stored data is not suitable for using the Matplotlib [boxplot](https://matplotlib.org/gallery/statistics/boxplot_demo.html) function. It's also not possible to *pivot* streaming data - this is why the previous cell wrote the streaming data to the filesystem.\n# MAGIC \n# MAGIC The following code:\n# MAGIC 1. Generates a list of device Ids to use as column headings.\n# MAGIC 1. Loads and pivots the stored data and then converts it to a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html).\n# MAGIC 1. Uses Matplotlib to generate a [box plot](https://en.wikipedia.org/wiki/Box_plot)\n# MAGIC \n# MAGIC A box plot is a way to show the spread of data and any outliers. The chart shows hourly box plots for each device. You need to wait for some time to see multiple hourly plots.\n# MAGIC \n# MAGIC Note: this chart isn't based on streaming data so you need to manually update it by re-running the cell.\n\n# COMMAND ----------\n\nimport matplotlib.pyplot as plt\n\n# Get list of distinct deviceId values\ndevicelist = spark.table('telemetry').select(collect_set('deviceId').alias('deviceId')).first()['deviceId']\n\n# Pivot and convert to a pandas dataframe\npdDF = spark.table('telemetry').groupBy('enqueuedtime').pivot('deviceId').mean('humidity').orderBy('enqueuedtime').withColumn('hour', date_trunc('hour', 'enqueuedtime')).toPandas()\n\n# Use the pandas plotting function\nplt.clf()\npdDF.boxplot(column=devicelist, by=['hour'], rot=90, fontsize='medium', layout=(2,2), figsize=(20,8))\ndisplay()\n"
] |
[
[
"matplotlib.pyplot.clf"
]
] |
JsBlueCat/SACN
|
[
"19abce5ac3d8e5880c4334a2192a78b86f106701"
] |
[
"train/operations.py"
] |
[
"import torch\nimport torch.nn as nn\n\nOPS = {\n 'none': lambda C, stride, affine: Zero(stride),\n 'avg_pool_3x3': lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),\n 'max_pool_3x3': lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),\n 'skip_connect': lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine),\n 'sep_conv_3x3': lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),\n 'sep_conv_5x5': lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),\n 'sep_conv_7x7': lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),\n 'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),\n 'dil_conv_5x5': lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),\n 'conv_7x1_1x7': lambda C, stride, affine: nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C, C, (1, 7), stride=(1, stride),\n padding=(0, 3), bias=False),\n nn.Conv2d(C, C, (7, 1), stride=(stride, 1),\n padding=(3, 0), bias=False),\n nn.BatchNorm2d(C, affine=affine)\n ),\n 'conv_5x1_1x5': lambda C, stride, affine: nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C, C, (1, 5), stride=(1, stride),\n padding=(0, 2), bias=False),\n nn.Conv2d(C, C, (5, 1), stride=(stride, 1),\n padding=(2, 0), bias=False),\n nn.BatchNorm2d(C, affine=affine)\n ),\n}\n\n\nclass ConvReLU(nn.Module):\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(ConvReLU, self).__init__()\n self.op = nn.Sequential(\n nn.Conv2d(C_in, C_out, kernel_size=kernel_size,\n stride=stride, padding=padding, bias=False),\n nn.ReLU()\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass BNConvReLU(nn.Module):\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(BNConvReLU, self).__init__()\n self.op = nn.Sequential(\n nn.BatchNorm2d(C_in, affine=affine),\n nn.Conv2d(C_in, C_out, kernel_size=kernel_size,\n stride=stride, padding=padding, bias=False),\n nn.ReLU()\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass ConvBNReLU(nn.Module):\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(ConvBNReLU, self).__init__()\n self.op = nn.Sequential(\n nn.Conv2d(C_in, C_out, kernel_size=kernel_size, stride=stride,\n padding=padding, bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n nn.ReLU()\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass ReLUConvBN(nn.Module):\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(ReLUConvBN, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(C_in, C_out, kernel_size=kernel_size, stride=stride,\n padding=padding, bias=False),\n nn.BatchNorm2d(C_out, affine=affine)\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass DilConv(nn.Module):\n def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):\n super(DilConv, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(C_in, C_out, kernel_size=kernel_size,\n stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_out, affine=affine)\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass SepConv(nn.Module):\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(SepConv, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size,\n stride=stride, padding=padding, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_in, affine=affine),\n nn.ReLU(),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1,\n padding=padding, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_out, affine=affine)\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass Identity(nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass Zero(nn.Module):\n def __init__(self, stride):\n super(Zero, self).__init__()\n self.stride = stride\n\n def forward(self, x):\n if self.stride == 1:\n return x.mul(0.)\n return x[:, :, ::self.stride, ::self.stride].mul(0.)\n\n\nclass FactorizedReduce(nn.Module):\n\n def __init__(self, C_in, C_out, affine=True):\n super(FactorizedReduce, self).__init__()\n assert C_out % 2 == 0\n self.relu = nn.ReLU(inplace=False)\n self.conv1 = nn.Conv2d(C_in, C_out // 2, 1,\n stride=2, padding=0, bias=False)\n self.conv2 = nn.Conv2d(C_in, C_out // 2, 1,\n stride=2, padding=0, bias=False)\n self.bn = nn.BatchNorm2d(C_out, affine=affine)\n\n def forward(self, x):\n x = self.relu(x)\n out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1)\n out = self.bn(out)\n return out\n"
] |
[
[
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
vineethbabu/mmaction2
|
[
"3f3ad9cae291c991b822cbc2ecfb88c1188e87c5",
"3f3ad9cae291c991b822cbc2ecfb88c1188e87c5"
] |
[
"mmaction/core/evaluation/accuracy.py",
"tools/deployment/publish_model.py"
] |
[
"# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\n\n\ndef confusion_matrix(y_pred, y_real, normalize=None):\n \"\"\"Compute confusion matrix.\n\n Args:\n y_pred (list[int] | np.ndarray[int]): Prediction labels.\n y_real (list[int] | np.ndarray[int]): Ground truth labels.\n normalize (str | None): Normalizes confusion matrix over the true\n (rows), predicted (columns) conditions or all the population.\n If None, confusion matrix will not be normalized. Options are\n \"true\", \"pred\", \"all\", None. Default: None.\n\n Returns:\n np.ndarray: Confusion matrix.\n \"\"\"\n if normalize not in ['true', 'pred', 'all', None]:\n raise ValueError(\"normalize must be one of {'true', 'pred', \"\n \"'all', None}\")\n\n if isinstance(y_pred, list):\n y_pred = np.array(y_pred)\n if y_pred.dtype == np.int32:\n y_pred = y_pred.astype(np.int64)\n if not isinstance(y_pred, np.ndarray):\n raise TypeError(\n f'y_pred must be list or np.ndarray, but got {type(y_pred)}')\n if not y_pred.dtype == np.int64:\n raise TypeError(\n f'y_pred dtype must be np.int64, but got {y_pred.dtype}')\n\n if isinstance(y_real, list):\n y_real = np.array(y_real)\n if y_real.dtype == np.int32:\n y_real = y_real.astype(np.int64)\n if not isinstance(y_real, np.ndarray):\n raise TypeError(\n f'y_real must be list or np.ndarray, but got {type(y_real)}')\n if not y_real.dtype == np.int64:\n raise TypeError(\n f'y_real dtype must be np.int64, but got {y_real.dtype}')\n\n label_set = np.unique(np.concatenate((y_pred, y_real)))\n num_labels = len(label_set)\n max_label = label_set[-1]\n label_map = np.zeros(max_label + 1, dtype=np.int64)\n for i, label in enumerate(label_set):\n label_map[label] = i\n\n y_pred_mapped = label_map[y_pred]\n y_real_mapped = label_map[y_real]\n\n confusion_mat = np.bincount(\n num_labels * y_real_mapped + y_pred_mapped,\n minlength=num_labels**2).reshape(num_labels, num_labels)\n\n with np.errstate(all='ignore'):\n if normalize == 'true':\n confusion_mat = (\n confusion_mat / confusion_mat.sum(axis=1, keepdims=True))\n elif normalize == 'pred':\n confusion_mat = (\n confusion_mat / confusion_mat.sum(axis=0, keepdims=True))\n elif normalize == 'all':\n confusion_mat = (confusion_mat / confusion_mat.sum())\n confusion_mat = np.nan_to_num(confusion_mat)\n\n return confusion_mat\n\n\ndef mean_class_accuracy(scores, labels):\n \"\"\"Calculate mean class accuracy.\n\n Args:\n scores (list[np.ndarray]): Prediction scores for each class.\n labels (list[int]): Ground truth labels.\n\n Returns:\n np.ndarray: Mean class accuracy.\n \"\"\"\n pred = np.argmax(scores, axis=1)\n cf_mat = confusion_matrix(pred, labels).astype(float)\n\n cls_cnt = cf_mat.sum(axis=1)\n cls_hit = np.diag(cf_mat)\n\n mean_class_acc = np.mean(\n [hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)])\n\n return mean_class_acc\n\n\ndef top_k_classes(scores, labels, k=10, mode='accurate'):\n \"\"\"Calculate the most K accurate (inaccurate) classes.\n\n Given the prediction scores, ground truth label and top-k value,\n compute the top K accurate (inaccurate) classes.\n\n Args:\n scores (list[np.ndarray]): Prediction scores for each class.\n labels (list[int] | np.ndarray): Ground truth labels.\n k (int): Top-k values. Default: 10.\n mode (str): Comparison mode for Top-k. Options are 'accurate'\n and 'inaccurate'. Default: 'accurate'.\n\n Return:\n list: List of sorted (from high accuracy to low accuracy for\n 'accurate' mode, and from low accuracy to high accuracy for\n inaccurate mode) top K classes in format of (label_id,\n acc_ratio).\n \"\"\"\n assert mode in ['accurate', 'inaccurate']\n pred = np.argmax(scores, axis=1)\n cf_mat = confusion_matrix(pred, labels).astype(float)\n\n cls_cnt = cf_mat.sum(axis=1)\n cls_hit = np.diag(cf_mat)\n hit_ratio = np.array(\n [hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)])\n\n if mode == 'accurate':\n max_index = np.argsort(hit_ratio)[-k:][::-1]\n max_value = hit_ratio[max_index]\n results = list(zip(max_index, max_value))\n else:\n min_index = np.argsort(hit_ratio)[:k]\n min_value = hit_ratio[min_index]\n results = list(zip(min_index, min_value))\n return results\n\n\ndef top_k_accuracy(scores, labels, topk=(1, )):\n \"\"\"Calculate top k accuracy score.\n\n Args:\n scores (list[np.ndarray]): Prediction scores for each class.\n labels (list[int]): Ground truth labels.\n topk (tuple[int]): K value for top_k_accuracy. Default: (1, ).\n\n Returns:\n list[float]: Top k accuracy score for each k.\n \"\"\"\n res = []\n labels = np.array(labels)[:, np.newaxis]\n for k in topk:\n max_k_preds = np.argsort(scores, axis=1)[:, -k:][:, ::-1]\n match_array = np.logical_or.reduce(max_k_preds == labels, axis=1)\n topk_acc_score = match_array.sum() / match_array.shape[0]\n res.append(topk_acc_score)\n\n return res\n\n\ndef mmit_mean_average_precision(scores, labels):\n \"\"\"Mean average precision for multi-label recognition. Used for reporting\n MMIT style mAP on Multi-Moments in Times. The difference is that this\n method calculates average-precision for each sample and averages them among\n samples.\n\n Args:\n scores (list[np.ndarray]): Prediction scores of different classes for\n each sample.\n labels (list[np.ndarray]): Ground truth many-hot vector for each\n sample.\n\n Returns:\n np.float: The MMIT style mean average precision.\n \"\"\"\n results = []\n for score, label in zip(scores, labels):\n precision, recall, _ = binary_precision_recall_curve(score, label)\n ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])\n results.append(ap)\n return np.mean(results)\n\n\ndef mean_average_precision(scores, labels):\n \"\"\"Mean average precision for multi-label recognition.\n\n Args:\n scores (list[np.ndarray]): Prediction scores of different classes for\n each sample.\n labels (list[np.ndarray]): Ground truth many-hot vector for each\n sample.\n\n Returns:\n np.float: The mean average precision.\n \"\"\"\n results = []\n scores = np.stack(scores).T\n labels = np.stack(labels).T\n\n for score, label in zip(scores, labels):\n precision, recall, _ = binary_precision_recall_curve(score, label)\n ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])\n results.append(ap)\n results = [x for x in results if not np.isnan(x)]\n if results == []:\n return np.nan\n return np.mean(results)\n\n\ndef binary_precision_recall_curve(y_score, y_true):\n \"\"\"Calculate the binary precision recall curve at step thresholds.\n\n Args:\n y_score (np.ndarray): Prediction scores for each class.\n Shape should be (num_classes, ).\n y_true (np.ndarray): Ground truth many-hot vector.\n Shape should be (num_classes, ).\n\n Returns:\n precision (np.ndarray): The precision of different thresholds.\n recall (np.ndarray): The recall of different thresholds.\n thresholds (np.ndarray): Different thresholds at which precision and\n recall are tested.\n \"\"\"\n assert isinstance(y_score, np.ndarray)\n assert isinstance(y_true, np.ndarray)\n assert y_score.shape == y_true.shape\n\n # make y_true a boolean vector\n y_true = (y_true == 1)\n # sort scores and corresponding truth values\n desc_score_indices = np.argsort(y_score, kind='mergesort')[::-1]\n y_score = y_score[desc_score_indices]\n y_true = y_true[desc_score_indices]\n # There may be ties in values, therefore find the `distinct_value_inds`\n distinct_value_inds = np.where(np.diff(y_score))[0]\n threshold_inds = np.r_[distinct_value_inds, y_true.size - 1]\n # accumulate the true positives with decreasing threshold\n tps = np.cumsum(y_true)[threshold_inds]\n fps = 1 + threshold_inds - tps\n thresholds = y_score[threshold_inds]\n\n precision = tps / (tps + fps)\n precision[np.isnan(precision)] = 0\n recall = tps / tps[-1]\n # stop when full recall attained\n # and reverse the outputs so recall is decreasing\n last_ind = tps.searchsorted(tps[-1])\n sl = slice(last_ind, None, -1)\n\n return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]\n\n\ndef pairwise_temporal_iou(candidate_segments,\n target_segments,\n calculate_overlap_self=False):\n \"\"\"Compute intersection over union between segments.\n\n Args:\n candidate_segments (np.ndarray): 1-dim/2-dim array in format\n ``[init, end]/[m x 2:=[init, end]]``.\n target_segments (np.ndarray): 2-dim array in format\n ``[n x 2:=[init, end]]``.\n calculate_overlap_self (bool): Whether to calculate overlap_self\n (union / candidate_length) or not. Default: False.\n\n Returns:\n t_iou (np.ndarray): 1-dim array [n] /\n 2-dim array [n x m] with IoU ratio.\n t_overlap_self (np.ndarray, optional): 1-dim array [n] /\n 2-dim array [n x m] with overlap_self, returns when\n calculate_overlap_self is True.\n \"\"\"\n candidate_segments_ndim = candidate_segments.ndim\n if target_segments.ndim != 2 or candidate_segments_ndim not in [1, 2]:\n raise ValueError('Dimension of arguments is incorrect')\n\n if candidate_segments_ndim == 1:\n candidate_segments = candidate_segments[np.newaxis, :]\n\n n, m = target_segments.shape[0], candidate_segments.shape[0]\n t_iou = np.empty((n, m), dtype=np.float32)\n if calculate_overlap_self:\n t_overlap_self = np.empty((n, m), dtype=np.float32)\n\n for i in range(m):\n candidate_segment = candidate_segments[i, :]\n tt1 = np.maximum(candidate_segment[0], target_segments[:, 0])\n tt2 = np.minimum(candidate_segment[1], target_segments[:, 1])\n # Intersection including Non-negative overlap score.\n segments_intersection = (tt2 - tt1).clip(0)\n # Segment union.\n segments_union = ((target_segments[:, 1] - target_segments[:, 0]) +\n (candidate_segment[1] - candidate_segment[0]) -\n segments_intersection)\n # Compute overlap as the ratio of the intersection\n # over union of two segments.\n t_iou[:, i] = (segments_intersection.astype(float) / segments_union)\n if calculate_overlap_self:\n candidate_length = candidate_segment[1] - candidate_segment[0]\n t_overlap_self[:, i] = (\n segments_intersection.astype(float) / candidate_length)\n\n if candidate_segments_ndim == 1:\n t_iou = np.squeeze(t_iou, axis=1)\n if calculate_overlap_self:\n if candidate_segments_ndim == 1:\n t_overlap_self = np.squeeze(t_overlap_self, axis=1)\n return t_iou, t_overlap_self\n\n return t_iou\n\n\ndef average_recall_at_avg_proposals(ground_truth,\n proposals,\n total_num_proposals,\n max_avg_proposals=None,\n temporal_iou_thresholds=np.linspace(\n 0.5, 0.95, 10)):\n \"\"\"Computes the average recall given an average number (percentile) of\n proposals per video.\n\n Args:\n ground_truth (dict): Dict containing the ground truth instances.\n proposals (dict): Dict containing the proposal instances.\n total_num_proposals (int): Total number of proposals in the\n proposal dict.\n max_avg_proposals (int | None): Max number of proposals for one video.\n Default: None.\n temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou\n thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.\n\n Returns:\n tuple([np.ndarray, np.ndarray, np.ndarray, float]):\n (recall, average_recall, proposals_per_video, auc)\n In recall, ``recall[i,j]`` is recall at i-th temporal_iou threshold\n at the j-th average number (percentile) of average number of\n proposals per video. The average_recall is recall averaged\n over a list of temporal_iou threshold (1D array). This is\n equivalent to ``recall.mean(axis=0)``. The ``proposals_per_video``\n is the average number of proposals per video. The auc is the area\n under ``AR@AN`` curve.\n \"\"\"\n\n total_num_videos = len(ground_truth)\n\n if not max_avg_proposals:\n max_avg_proposals = float(total_num_proposals) / total_num_videos\n\n ratio = (max_avg_proposals * float(total_num_videos) / total_num_proposals)\n\n # For each video, compute temporal_iou scores among the retrieved proposals\n score_list = []\n total_num_retrieved_proposals = 0\n for video_id in ground_truth:\n # Get proposals for this video.\n proposals_video_id = proposals[video_id]\n this_video_proposals = proposals_video_id[:, :2]\n # Sort proposals by score.\n sort_idx = proposals_video_id[:, 2].argsort()[::-1]\n this_video_proposals = this_video_proposals[sort_idx, :].astype(\n np.float32)\n\n # Get ground-truth instances associated to this video.\n ground_truth_video_id = ground_truth[video_id]\n this_video_ground_truth = ground_truth_video_id[:, :2].astype(\n np.float32)\n if this_video_proposals.shape[0] == 0:\n n = this_video_ground_truth.shape[0]\n score_list.append(np.zeros((n, 1)))\n continue\n\n if this_video_proposals.ndim != 2:\n this_video_proposals = np.expand_dims(this_video_proposals, axis=0)\n if this_video_ground_truth.ndim != 2:\n this_video_ground_truth = np.expand_dims(\n this_video_ground_truth, axis=0)\n\n num_retrieved_proposals = np.minimum(\n int(this_video_proposals.shape[0] * ratio),\n this_video_proposals.shape[0])\n total_num_retrieved_proposals += num_retrieved_proposals\n this_video_proposals = this_video_proposals[:\n num_retrieved_proposals, :]\n\n # Compute temporal_iou scores.\n t_iou = pairwise_temporal_iou(this_video_proposals,\n this_video_ground_truth)\n score_list.append(t_iou)\n\n # Given that the length of the videos is really varied, we\n # compute the number of proposals in terms of a ratio of the total\n # proposals retrieved, i.e. average recall at a percentage of proposals\n # retrieved per video.\n\n # Computes average recall.\n pcn_list = np.arange(1, 101) / 100.0 * (\n max_avg_proposals * float(total_num_videos) /\n total_num_retrieved_proposals)\n matches = np.empty((total_num_videos, pcn_list.shape[0]))\n positives = np.empty(total_num_videos)\n recall = np.empty((temporal_iou_thresholds.shape[0], pcn_list.shape[0]))\n # Iterates over each temporal_iou threshold.\n for ridx, temporal_iou in enumerate(temporal_iou_thresholds):\n # Inspect positives retrieved per video at different\n # number of proposals (percentage of the total retrieved).\n for i, score in enumerate(score_list):\n # Total positives per video.\n positives[i] = score.shape[0]\n # Find proposals that satisfies minimum temporal_iou threshold.\n true_positives_temporal_iou = score >= temporal_iou\n # Get number of proposals as a percentage of total retrieved.\n pcn_proposals = np.minimum(\n (score.shape[1] * pcn_list).astype(np.int), score.shape[1])\n\n for j, num_retrieved_proposals in enumerate(pcn_proposals):\n # Compute the number of matches\n # for each percentage of the proposals\n matches[i, j] = np.count_nonzero(\n (true_positives_temporal_iou[:, :num_retrieved_proposals]\n ).sum(axis=1))\n\n # Computes recall given the set of matches per video.\n recall[ridx, :] = matches.sum(axis=0) / positives.sum()\n\n # Recall is averaged.\n avg_recall = recall.mean(axis=0)\n\n # Get the average number of proposals per video.\n proposals_per_video = pcn_list * (\n float(total_num_retrieved_proposals) / total_num_videos)\n # Get AUC\n area_under_curve = np.trapz(avg_recall, proposals_per_video)\n auc = 100. * float(area_under_curve) / proposals_per_video[-1]\n return recall, avg_recall, proposals_per_video, auc\n\n\ndef get_weighted_score(score_list, coeff_list):\n \"\"\"Get weighted score with given scores and coefficients.\n\n Given n predictions by different classifier: [score_1, score_2, ...,\n score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ...,\n coeff_n] (coeff_list), return weighted score: weighted_score =\n score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n\n\n Args:\n score_list (list[list[np.ndarray]]): List of list of scores, with shape\n n(number of predictions) X num_samples X num_classes\n coeff_list (list[float]): List of coefficients, with shape n.\n\n Returns:\n list[np.ndarray]: List of weighted scores.\n \"\"\"\n assert len(score_list) == len(coeff_list)\n num_samples = len(score_list[0])\n for i in range(1, len(score_list)):\n assert len(score_list[i]) == num_samples\n\n scores = np.array(score_list) # (num_coeff, num_samples, num_classes)\n coeff = np.array(coeff_list) # (num_coeff, )\n weighted_scores = list(np.dot(scores.T, coeff).T)\n return weighted_scores\n\n\ndef softmax(x, dim=1):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x, axis=dim, keepdims=True))\n return e_x / e_x.sum(axis=dim, keepdims=True)\n\n\ndef interpolated_precision_recall(precision, recall):\n \"\"\"Interpolated AP - VOCdevkit from VOC 2011.\n\n Args:\n precision (np.ndarray): The precision of different thresholds.\n recall (np.ndarray): The recall of different thresholds.\n\n Returns:\n float: Average precision score.\n \"\"\"\n mprecision = np.hstack([[0], precision, [0]])\n mrecall = np.hstack([[0], recall, [1]])\n for i in range(len(mprecision) - 1)[::-1]:\n mprecision[i] = max(mprecision[i], mprecision[i + 1])\n idx = np.where(mrecall[1::] != mrecall[0:-1])[0] + 1\n ap = np.sum((mrecall[idx] - mrecall[idx - 1]) * mprecision[idx])\n return ap\n\n\ndef average_precision_at_temporal_iou(ground_truth,\n prediction,\n temporal_iou_thresholds=(np.linspace(\n 0.5, 0.95, 10))):\n \"\"\"Compute average precision (in detection task) between ground truth and\n predicted data frames. If multiple predictions match the same predicted\n segment, only the one with highest score is matched as true positive. This\n code is greatly inspired by Pascal VOC devkit.\n\n Args:\n ground_truth (dict): Dict containing the ground truth instances.\n Key: 'video_id'\n Value (np.ndarray): 1D array of 't-start' and 't-end'.\n prediction (np.ndarray): 2D array containing the information of\n proposal instances, including 'video_id', 'class_id', 't-start',\n 't-end' and 'score'.\n temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou\n thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.\n\n Returns:\n np.ndarray: 1D array of average precision score.\n \"\"\"\n ap = np.zeros(len(temporal_iou_thresholds), dtype=np.float32)\n if len(prediction) < 1:\n return ap\n\n num_gts = 0.\n lock_gt = dict()\n for key in ground_truth:\n lock_gt[key] = np.ones(\n (len(temporal_iou_thresholds), len(ground_truth[key]))) * -1\n num_gts += len(ground_truth[key])\n\n # Sort predictions by decreasing score order.\n prediction = np.array(prediction)\n scores = prediction[:, 4].astype(float)\n sort_idx = np.argsort(scores)[::-1]\n prediction = prediction[sort_idx]\n\n # Initialize true positive and false positive vectors.\n tp = np.zeros((len(temporal_iou_thresholds), len(prediction)),\n dtype=np.int32)\n fp = np.zeros((len(temporal_iou_thresholds), len(prediction)),\n dtype=np.int32)\n\n # Assigning true positive to truly grount truth instances.\n for idx, this_pred in enumerate(prediction):\n\n # Check if there is at least one ground truth in the video.\n if this_pred[0] in ground_truth:\n this_gt = np.array(ground_truth[this_pred[0]], dtype=float)\n else:\n fp[:, idx] = 1\n continue\n\n t_iou = pairwise_temporal_iou(this_pred[2:4].astype(float), this_gt)\n # We would like to retrieve the predictions with highest t_iou score.\n t_iou_sorted_idx = t_iou.argsort()[::-1]\n for t_idx, t_iou_threshold in enumerate(temporal_iou_thresholds):\n for jdx in t_iou_sorted_idx:\n if t_iou[jdx] < t_iou_threshold:\n fp[t_idx, idx] = 1\n break\n if lock_gt[this_pred[0]][t_idx, jdx] >= 0:\n continue\n # Assign as true positive after the filters above.\n tp[t_idx, idx] = 1\n lock_gt[this_pred[0]][t_idx, jdx] = idx\n break\n\n if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0:\n fp[t_idx, idx] = 1\n\n tp_cumsum = np.cumsum(tp, axis=1).astype(np.float32)\n fp_cumsum = np.cumsum(fp, axis=1).astype(np.float32)\n recall_cumsum = tp_cumsum / num_gts\n\n precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum)\n\n for t_idx in range(len(temporal_iou_thresholds)):\n ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :],\n recall_cumsum[t_idx, :])\n\n return ap\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport platform\nimport subprocess\n\nimport torch\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Process a checkpoint to be published')\n parser.add_argument('in_file', help='input checkpoint filename')\n parser.add_argument('out_file', help='output checkpoint filename')\n args = parser.parse_args()\n return args\n\n\ndef process_checkpoint(in_file, out_file):\n checkpoint = torch.load(in_file, map_location='cpu')\n # remove optimizer for smaller file size\n if 'optimizer' in checkpoint:\n del checkpoint['optimizer']\n # if it is necessary to remove some sensitive data in checkpoint['meta'],\n # add the code here.\n torch.save(checkpoint, out_file)\n if platform.system() == 'Windows':\n sha = subprocess.check_output(\n ['certutil', '-hashfile', out_file, 'SHA256'])\n sha = str(sha).split('\\\\r\\\\n')[1]\n else:\n sha = subprocess.check_output(['sha256sum', out_file]).decode()\n if out_file.endswith('.pth'):\n out_file_name = out_file[:-4]\n else:\n out_file_name = out_file\n final_file = out_file_name + f'-{sha[:8]}.pth'\n os.rename(out_file, final_file)\n\n\ndef main():\n args = parse_args()\n process_checkpoint(args.in_file, args.out_file)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"numpy.expand_dims",
"numpy.minimum",
"numpy.linspace",
"numpy.squeeze",
"numpy.nan_to_num",
"numpy.cumsum",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.where",
"numpy.trapz",
"numpy.hstack",
"numpy.arange",
"numpy.stack",
"numpy.logical_or.reduce",
"numpy.argmax",
"numpy.diff",
"numpy.zeros",
"numpy.isnan",
"numpy.errstate",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.maximum",
"numpy.bincount",
"numpy.empty"
],
[
"torch.save",
"torch.load"
]
] |
noriyukipy/gptchat
|
[
"15febcc69cf79ffbca50bd8897447b5804bcef54"
] |
[
"gptchat/callback.py"
] |
[
"import tensorflow.keras as keras\n\n\nclass WarmupScheduler(keras.callbacks.Callback):\n def __init__(self, warmup_steps, learning_rate):\n super().__init__()\n\n self._warmup_steps = warmup_steps\n self._learning_rate = learning_rate\n\n # The argument passed to on_train_batch_begin\n # is resetted every epoch.\n # self._total_steps is used to keep total step\n self._total_steps = 0\n\n def on_train_batch_begin(self, step, logs=None):\n self._total_steps += 1\n step = self._total_steps\n\n if step > self._warmup_steps:\n return\n\n # Get the current learning rate from model's optimizer.\n lr = float(keras.backend.get_value(self.model.optimizer.lr))\n # Call schedule function to get the scheduled learning rate.\n scheduled_lr = self._learning_rate * (step / self._warmup_steps)\n # Set the value back to the optimizer before this epoch starts\n keras.backend.set_value(self.model.optimizer.lr, scheduled_lr)\n # print('\\nStep {}: lr is schedulerd {:.4e} -> {:.4e}]'.format(step, lr, float(tf.keras.backend.get_value(self.model.optimizer.lr))))\n\n\nclass TransformersCheckpoint(keras.callbacks.Callback):\n def __init__(self, model, save_dir):\n super().__init__()\n\n self._model = model\n self._save_dir = save_dir\n\n def on_epoch_end(self, epoch, logs=None):\n save_dir = self._save_dir.format(epoch=epoch)\n print(f\"Save transformers model in {save_dir}\")\n self._model.save_pretrained(save_dir)\n"
] |
[
[
"tensorflow.keras.backend.get_value",
"tensorflow.keras.backend.set_value"
]
] |
GJBoth/DeePyMoD_torch
|
[
"b4b90080f4f9fea8fdf4426e0708e807b193242f",
"b4b90080f4f9fea8fdf4426e0708e807b193242f"
] |
[
"paper/figures/3/run_deepmod.py",
"src/DeePyMoD_SBL/deepymod_torch/network.py"
] |
[
"# Imports\nimport numpy as np\nimport torch\n\nfrom phimal_utilities.data import Dataset\nfrom phimal_utilities.data.burgers import BurgersDelta\nfrom DeePyMoD_SBL.deepymod_torch.library_functions import library_1D_in\nfrom DeePyMoD_SBL.deepymod_torch.DeepMod import DeepMod\n\nimport time\nfrom DeePyMoD_SBL.deepymod_torch.output import Tensorboard, progress\nfrom DeePyMoD_SBL.deepymod_torch.losses import reg_loss, mse_loss, l1_loss\nfrom DeePyMoD_SBL.deepymod_torch.sparsity import scaling, threshold\nfrom numpy import pi\n\n\n# Defining training function\ndef train(model, data, target, optimizer, max_iterations, loss_func_args, log_dir=None):\n start_time = time.time()\n number_of_terms = [coeff_vec.shape[0] for coeff_vec in model(data)[3]]\n board = Tensorboard(number_of_terms, log_dir)\n \n # Training\n print('| Iteration | Progress | Time remaining | Cost | MSE | Reg | LL |')\n for iteration in torch.arange(0, max_iterations + 1):\n # Calculating prediction and library and scaling\n prediction, time_deriv_list, sparse_theta_list, coeff_vector_list, theta = model(data)\n coeff_vector_scaled_list = scaling(coeff_vector_list, sparse_theta_list, time_deriv_list) \n \n # Calculating loss\n loss_mse = mse_loss(prediction, target)\n loss_reg = reg_loss(time_deriv_list, sparse_theta_list, coeff_vector_list)\n loss = torch.sum(loss_mse) + torch.sum(loss_reg)\n \n # Writing\n if iteration % 100 == 0:\n # Write progress to command line\n progress(iteration, start_time, max_iterations, loss.item(), torch.sum(loss_mse).item(), torch.sum(loss_reg).item(), torch.sum(loss_reg).item())\n \n lstsq_solution = torch.inverse(theta.T @ theta) @ theta.T @ time_deriv_list[0]\n \n # Calculate error for theta\n theta_true = loss_func_args['library']\n dt_true = loss_func_args['time_deriv']\n mae_library = torch.mean(torch.abs(theta - theta_true), dim=0)\n mae_dt = torch.mean(torch.abs(dt_true - time_deriv_list[0]), dim=0)\n \n # Write to tensorboard\n board.write(iteration, loss, loss_mse, loss_reg, loss_reg, coeff_vector_list, coeff_vector_scaled_list, lstsq_solution=lstsq_solution.squeeze(), mae_library=mae_library, mae_time_deriv=mae_dt)\n\n # Optimizer step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n board.close()\n\n# Settings and parameters\nif torch.cuda.is_available():\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\nnp.random.seed(42)\ntorch.manual_seed(42)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nv = 0.1\nA = 1.0\n\n# Making grid\nx = np.linspace(-3, 4, 100)\nt = np.linspace(0.5, 5.0, 50)\nx_grid, t_grid = np.meshgrid(x, t, indexing='ij')\n\n# Making data\ndataset = Dataset(BurgersDelta, v=v, A=A)\nconfig = {'n_in': 2, 'hidden_dims': [30, 30, 30, 30, 30], 'n_out': 1, 'library_function':library_1D_in, 'library_args':{'poly_order':2, 'diff_order': 3}}\nn_runs = 5\n\nfor run_idx in np.arange(n_runs):\n X_train, y_train, rand_idx = dataset.create_dataset(x_grid.reshape(-1, 1), t_grid.reshape(-1, 1), n_samples=1000, noise=0.1, random=True, return_idx=True)\n \n theta = dataset.library(x_grid.reshape(-1, 1), t_grid.reshape(-1, 1), poly_order=2, deriv_order=3)[rand_idx, :]\n dt = dataset.time_deriv(x_grid.reshape(-1, 1), t_grid.reshape(-1, 1))[rand_idx, :]\n \n model = DeepMod(**config)\n optimizer = torch.optim.Adam(model.parameters(), betas=(0.99, 0.999), amsgrad=True)\n train(model, X_train, y_train, optimizer, 100000, loss_func_args={'library':torch.tensor(theta) ,'time_deriv': torch.tensor(dt)}, log_dir = f'runs/deepmod_run_{run_idx}')\n torch.save(model.state_dict(), f'data/deepmod_run_{run_idx}.pt')",
"import torch\nimport torch.nn as nn\n\n\nclass Library(nn.Module):\n def __init__(self, library_func, library_args={}):\n super().__init__()\n self.library_func = library_func\n self.library_args = library_args\n\n def forward(self, input):\n time_deriv_list, theta = self.library_func(input, **self.library_args)\n return time_deriv_list, theta\n\n\nclass Fitting(nn.Module):\n def __init__(self, n_terms, n_out):\n super().__init__()\n self.coeff_vector = nn.ParameterList([torch.nn.Parameter(torch.rand((n_terms, 1), dtype=torch.float32)) for _ in torch.arange(n_out)])\n self.sparsity_mask = [torch.ones(n_terms, dtype=torch.bool) for _ in torch.arange(n_out)]\n\n def forward(self, input):\n thetas, time_derivs = input\n sparse_thetas = self.apply_mask(thetas)\n self.coeff_vector = self.fit_coefficient(sparse_thetas, time_derivs)\n return sparse_thetas, self.coeff_vector\n\n def apply_mask(self, theta):\n sparse_theta = [theta[:, sparsity_mask] for sparsity_mask in self.sparsity_mask]\n return sparse_theta\n \n def fit_coefficient(self, thetas, time_derivs):\n return self.coeff_vector\n\n \nclass FittingDynamic(nn.Module):\n def __init__(self, n_terms, n_out):\n super().__init__()\n self.coeff_vector = [torch.rand((n_terms, 1), dtype=torch.float32) for _ in torch.arange(n_out)] # initialize randomly cause otherwise tensorboard will complain\n self.sparsity_mask = [torch.ones(n_terms, dtype=torch.bool) for _ in torch.arange(n_out)]\n\n def forward(self, input):\n thetas, time_derivs = input\n sparse_thetas = self.apply_mask(thetas)\n self.coeff_vector = self.fit_coefficient(sparse_thetas, time_derivs)\n return sparse_thetas, self.coeff_vector\n\n def apply_mask(self, theta):\n sparse_theta = [theta[:, sparsity_mask] for sparsity_mask in self.sparsity_mask]\n return sparse_theta\n \n def fit_coefficient(self, thetas, time_derivs):\n #opt_coeff = [torch.inverse(theta.T @ theta) @ (theta.T @ dt) for theta, dt in zip(thetas, time_derivs)] # normal equation for least squares\n opt_coeff = []\n for theta, dt in zip(thetas, time_derivs):\n norm = torch.norm(theta, dim=0, keepdim=True)\n Q, R = torch.qr(theta / norm)\n opt_coeff.append(torch.inverse(R) @ Q.T @ dt / norm.T)\n \n #U, S, V = torch.svd(R)\n #print(torch.max(S) / torch.min(S))\n \n return opt_coeff\n"
] |
[
[
"torch.set_default_tensor_type",
"torch.abs",
"numpy.random.seed",
"numpy.linspace",
"torch.manual_seed",
"numpy.arange",
"torch.sum",
"torch.tensor",
"torch.inverse",
"torch.cuda.is_available",
"torch.arange",
"numpy.meshgrid"
],
[
"torch.norm",
"torch.ones",
"torch.inverse",
"torch.qr",
"torch.rand",
"torch.arange"
]
] |
kalingibbons/artifact
|
[
"fbcbd277e0d48abb3f2f55f80c81ca0b4cb7d090"
] |
[
"notebooks/04-kg-hyperparameter-tuning.py"
] |
[
"# %% [markdown]\n# # Comprehensive Exam\n#\n# ## Coding Artifact\n#\n# Kalin Gibbons\n#\n# Nov 20, 2020\n#\n# > Note: A hyperparameter is a numerical or other measurable factor\n# responsible for some aspect of training a machine learning model, whose value\n# cannot be estimated from the data, unlike regular parameters which represent\n# inherent properties of the natural processes which generated data.\n#\n# ## Hyperparameter Optimization\n#\n# There are several python packages with automatic hyperparameter selection\n# algorithms. A relatively recent contribution which I find particularly easy\n# to use is [optuna](https://optuna.org/), which is detailed in this\n# [2019 paper](https://arxiv.org/abs/1907.10902). Optuna allows the user to\n# suggest ranges of values for parameters of various types, then utilizes a\n# parameter sampling algorithms to find an optimal set of hyperparameters. Some\n# of the sampling schemes available are:\n#\n# * Grid Search\n# * Random\n# * Bayesian\n# * Evolutionary\n#\n# While the parameter suggestion schemes available are:\n#\n# * Integers\n# * Linear step\n# * Logarithmic step\n# * Floats\n# * Logarithmic\n# * Uniform\n# * Categorical\n# * List\n#\n# This notebook uses Optuna to implement hyperparameter tuning on a number of\n# ensemble algorithms.\n#\n# ## Imports\n\n# %%\nimport os\nimport sys\nimport math\nimport logging\nfrom pathlib import Path\n\nfrom IPython.display import display, clear_output\nfrom colorama import Fore, Style\nimport numpy as np\nimport scipy as sp\nimport scipy.io as spio\nimport sklearn\nimport statsmodels.api as sm\nfrom statsmodels.formula.api import ols\n\nimport sklearn\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import (\n AdaBoostRegressor,\n GradientBoostingRegressor,\n RandomForestRegressor,\n)\nfrom sklearn.linear_model import LinearRegression, Ridge\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.tree import DecisionTreeRegressor\nfrom tqdm.auto import tqdm\n\n# !%load_ext autoreload\n# !%autoreload 2\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n# !%matplotlib inline\n# !%config InlineBackend.figure_format = 'retina'\n\n# import seaborn as sns\nimport pandas as pd\n\nimport optuna\nfrom optuna.visualization import plot_optimization_history\n\nimport artifact\nfrom artifact.datasets import load_tkr, tkr_group_lut\nfrom artifact.helpers import RegressionProfile, REGRESSION_PROFILE_PATH\n\n\n# %%\n\nplt.rcParams[\"figure.figsize\"] = (9, 5.5)\nmpl.rcParams[\"mathtext.fontset\"] = \"stix\"\nmpl.rcParams[\"font.size\"] = 14\nmpl.rcParams[\"font.family\"] = \"Times New Roman\"\n\n# sns.set_context(\"poster\")\n# sns.set(rc={'figure.figsize': (16, 9.)})\n# sns.set_style(\"whitegrid\")\n\npd.set_option(\"display.max_rows\", 120)\npd.set_option(\"display.max_columns\", 120)\n\nlogging.basicConfig(level=logging.INFO, stream=sys.stdout)\n\n# %% [markdown]\n# Next, we'll select a functional group to examine, and only load the necessary\n# data.\n# ### Functional group selection\n\n# %%\nfunc_groups = list(tkr_group_lut.keys())\nfunc_groups\n\n\n# %%\ngroup = \"joint_loads\"\n\n# %% [markdown]\n# ### Loading the data\n#\n# We'll load a subset of the data containing the responses making up the chosen\n# functional group.\n\n# %%\nshared_kwargs = dict(results_reader=load_tkr, functional_groups=group)\ntkr_train = artifact.Results(**shared_kwargs, subset=\"train\")\ntkr_test = artifact.Results(**shared_kwargs, subset=\"test\")\ndisplay(tkr_train.response_names[1:])\n\nreg_prof = RegressionProfile(load_path=REGRESSION_PROFILE_PATH)\nreg_prof.describe(group)\n\n# %% [markdown]\n# ### Creating the optimization study\n#\n# First we must define an objective function, which suggests the ranges of\n# hyperparameters to be sampled. We can use switch-cases to optimize the machine\n# learning algorithm itself, in addition to the hyperparameters.\n\n# %%\nlearners = (\n # GradientBoostingRegressor(),\n # RandomForestRegressor(),\n # AdaBoostRegressor(DecisionTreeRegressor()),\n # AdaBoostRegressor(LinearRegression()),\n DecisionTreeRegressor(),\n Ridge(),\n # AdaBoostRegressor()\n)\n\n\ndef objective(trial, train, test, regressors):\n reg_strs = [r.__repr__() for r in regressors]\n regressor_name = trial.suggest_categorical(\"classifier\", reg_strs)\n\n if regressor_name == \"GradientBoostingRegressor()\":\n # learner_obj = GradientBoostingRegressor()\n pass\n\n elif regressor_name == \"RandomForestRegressor()\":\n pass\n\n elif regressor_name == \"AdaBoostRegressor(base_estimator=DecisionTreeRegressor())\":\n criterion = trial.suggest_categorical(\n \"criterion\", [\"mse\", \"friedman_mse\", \"mae\", \"poisson\"]\n )\n splitter = trial.suggest_categorical(\"splitter\", [\"best\", \"random\"])\n max_depth = trial.suggest_categorical(\"max_depth\", [3, 4, 5])\n min_samples_split = trial.suggest_categorical(\n \"min_samples_split\",\n [\n 2,\n ],\n )\n min_samples_leaf = trial.suggest_uniform(\"min_samples_leaf\", 0, 0.5)\n estimator = DecisionTreeRegressor(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n )\n\n loss = trial.suggest_categorical(\"loss\", [\"linear\", \"square\", \"exponential\"])\n n_estimators = trial.suggest_categorical(\"n_estimators\", [100])\n learner_obj = AdaBoostRegressor(estimator, n_estimators=n_estimators, loss=loss)\n cv = 7\n\n elif regressor_name == \"AdaBoostRegressor(base_estimator=LinearRegression())\":\n loss = trial.suggest_categorical(\"loss\", [\"linear\", \"square\", \"exponential\"])\n n_estimators = trial.suggest_categorical(\"n_estimators\", [100])\n learner_obj = AdaBoostRegressor(\n LinearRegression(), n_estimators=n_estimators, loss=loss\n )\n cv = 7\n\n elif regressor_name == \"DecisionTreeRegressor()\":\n criterion = trial.suggest_categorical(\n \"criterion\", [\"mse\", \"friedman_mse\", \"mae\", \"poisson\"]\n )\n splitter = trial.suggest_categorical(\"splitter\", [\"best\", \"random\"])\n max_depth = trial.suggest_categorical(\"max_depth\", [3, 4, 5])\n min_samples_split = trial.suggest_categorical(\n \"min_samples_split\",\n [\n 2,\n ],\n )\n min_samples_leaf = trial.suggest_uniform(\"min_samples_leaf\", 0, 0.5)\n learner_obj = DecisionTreeRegressor(\n criterion=criterion,\n splitter=splitter,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n )\n cv = 7\n\n elif regressor_name == \"Ridge()\":\n # alpha = trial.suggest_loguniform('alpha', 1e-5, 10)\n alpha = trial.suggest_uniform(\"alpha\", 4, 6)\n learner_obj = Ridge(alpha=alpha)\n cv = 7\n\n elif regressor_name == \"AdaBoostRegressorj()\":\n pass\n\n else:\n pass\n\n regressor = artifact.Regressor(train, test, learner_obj, scaler=StandardScaler())\n scores = regressor.cross_val_score(n_jobs=-1, cv=cv)\n\n return scores.mean() * 100\n\n\n# %% [markdown]\n# ### Running the optimization\n#\n# Optuna will sample the parameters automatically, for a maximum number of trials\n# specified.\n\n# %%\nstudy = optuna.create_study(direction=\"minimize\")\nstudy.optimize(lambda t: objective(t, tkr_train, tkr_test, learners), n_trials=50)\n\n# %%\nplot_optimization_history(study).show()\nprint(study.best_trial)\nprint(\n Fore.YELLOW + f\"\\nBest trial\\n RMSE% = {study.best_value} \\n {study.best_params}\"\n)\nprint(Style.RESET_ALL)\n\n# %% [markdown]\n# ### Plotting the results from the optimization\n#\n# We can assign the hyperparameters selected by optuna, and plot the resulting joint mechanics.\n\n# %%\nlearner_strs = [lrn.__repr__() for lrn in learners]\nlearner_dict = dict(zip(learner_strs, learners))\nlearner_kwargs = study.best_params.copy()\nlearner = learner_dict[learner_kwargs[\"classifier\"]]\nlearner_kwargs.pop(\"classifier\")\nlearner.set_params(**learner_kwargs)\n\n\n# %%\nlrn_name = type(learner).__name__\ntry:\n lrn_name = \"-\".join((lrn_name, type(learner.base_estimator).__name__))\nexcept AttributeError:\n pass\n\ntop_fig_dir = Path.cwd().parent / \"models\" / \"predictions\"\nsave_dir = top_fig_dir / group / lrn_name\nn_rows, n_cols = 4, 3\ntim = tkr_train.response[\"time\"][0]\nscaler = StandardScaler()\nregr = artifact.Regressor(tkr_train, tkr_test, learner, scaler=scaler)\nfor resp_name in tkr_train.response_names:\n if resp_name == \"time\":\n continue\n artifact.create_plots(n_rows, n_cols, regr, resp_name, save_dir)\n clear_output(wait=True)\n\n\n# %%\nview = artifact.plotting.ImageViewer(top_fig_dir)\nview.show()\n"
] |
[
[
"sklearn.tree.DecisionTreeRegressor",
"sklearn.linear_model.Ridge",
"sklearn.linear_model.LinearRegression",
"pandas.set_option",
"sklearn.preprocessing.StandardScaler",
"sklearn.ensemble.AdaBoostRegressor"
]
] |
michael-lazar/playscii
|
[
"dcefdd1deef9bdb3517c39d0129e948d238d4978"
] |
[
"framebuffer.py"
] |
[
"import numpy as np\nfrom OpenGL import GL\n\n\nclass Framebuffer:\n \n start_crt_enabled = False\n disable_crt = False\n clear_color = (0, 0, 0, 1)\n # declared as an option here in case people want to sub their own via CFG\n crt_fragment_shader_filename = 'framebuffer_f_crt.glsl'\n \n def __init__(self, app, width=None, height=None):\n self.app = app\n self.width, self.height = width or self.app.window_width, height or self.app.window_height\n # bind vao before compiling shaders\n if self.app.use_vao:\n self.vao = GL.glGenVertexArrays(1)\n GL.glBindVertexArray(self.vao)\n self.vbo = GL.glGenBuffers(1)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)\n fb_verts = np.array([-1, -1, 1, -1, -1, 1, 1, 1], dtype=np.float32)\n GL.glBufferData(GL.GL_ARRAY_BUFFER, fb_verts.nbytes, fb_verts,\n GL.GL_STATIC_DRAW)\n # texture, depth buffer, framebuffer\n self.texture = GL.glGenTextures(1)\n self.depth_buffer = GL.glGenRenderbuffers(1)\n self.framebuffer = GL.glGenFramebuffers(1)\n self.setup_texture_and_buffers()\n # shaders\n self.plain_shader = self.app.sl.new_shader('framebuffer_v.glsl', 'framebuffer_f.glsl')\n if not self.disable_crt:\n self.crt_shader = self.app.sl.new_shader('framebuffer_v.glsl', self.crt_fragment_shader_filename)\n self.crt = self.get_crt_enabled()\n # shader uniforms and attributes\n self.plain_tex_uniform = self.plain_shader.get_uniform_location('fbo_texture')\n self.plain_attrib = self.plain_shader.get_attrib_location('v_coord')\n GL.glEnableVertexAttribArray(self.plain_attrib)\n GL.glVertexAttribPointer(self.plain_attrib, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n if not self.disable_crt:\n self.crt_tex_uniform = self.crt_shader.get_uniform_location('fbo_texture')\n self.crt_time_uniform = self.crt_shader.get_uniform_location('elapsed_time')\n self.crt_res_uniform = self.crt_shader.get_uniform_location('resolution')\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)\n if self.app.use_vao:\n GL.glBindVertexArray(0)\n \n def get_crt_enabled(self):\n return self.disable_crt or self.start_crt_enabled\n \n def setup_texture_and_buffers(self):\n GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture)\n GL.glTexParameterf(GL.GL_TEXTURE_2D,\n GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n GL.glTexParameterf(GL.GL_TEXTURE_2D,\n GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)\n GL.glTexParameterf(GL.GL_TEXTURE_2D,\n GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)\n GL.glTexParameterf(GL.GL_TEXTURE_2D,\n GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE)\n GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA,\n self.width, self.height, 0,\n GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, None)\n GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.depth_buffer)\n GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT16,\n self.width, self.height)\n GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, 0)\n GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.framebuffer)\n GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0,\n GL.GL_TEXTURE_2D, self.texture, 0)\n GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT,\n GL.GL_RENDERBUFFER, self.depth_buffer)\n GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)\n \n def resize(self, new_width, new_height):\n self.width, self.height = new_width, new_height\n self.setup_texture_and_buffers()\n \n def toggle_crt(self):\n self.crt = not self.crt\n \n def destroy(self):\n if self.app.use_vao:\n GL.glDeleteVertexArrays(1, [self.vao])\n GL.glDeleteBuffers(1, [self.vbo])\n GL.glDeleteRenderbuffers(1, [self.depth_buffer])\n GL.glDeleteTextures([self.texture])\n GL.glDeleteFramebuffers(1, [self.framebuffer])\n \n def render(self):\n if self.crt and not self.disable_crt:\n GL.glUseProgram(self.crt_shader.program)\n GL.glUniform1i(self.crt_tex_uniform, 0)\n GL.glUniform2f(self.crt_res_uniform, self.width, self.height)\n GL.glUniform1f(self.crt_time_uniform, self.app.get_elapsed_time())\n else:\n GL.glUseProgram(self.plain_shader.program)\n GL.glUniform1i(self.plain_tex_uniform, 0)\n GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture)\n GL.glClearColor(*self.clear_color)\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n # VAO vs non-VAO paths\n if self.app.use_vao:\n GL.glBindVertexArray(self.vao)\n else:\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)\n GL.glVertexAttribPointer(self.plain_attrib, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n GL.glEnableVertexAttribArray(self.plain_attrib)\n GL.glDrawArrays(GL.GL_TRIANGLE_STRIP, 0, 4)\n if self.app.use_vao:\n GL.glBindVertexArray(0)\n GL.glUseProgram(0)\n\n\nclass ExportFramebuffer(Framebuffer):\n clear_color = (0, 0, 0, 0)\n def get_crt_enabled(self): return True\n\n\nclass ExportFramebufferNoCRT(Framebuffer):\n clear_color = (0, 0, 0, 0)\n def get_crt_enabled(self): return False\n"
] |
[
[
"numpy.array"
]
] |
tkeech1/aws_ml
|
[
"512fd8c8770cbf5128ce9e0c03655c3d3021776b"
] |
[
"code/python_data_science_handbook/mprun_demo.py"
] |
[
"\nimport numpy as np\nimport pandas as pd\n\ndef sum_of_lists():\n #a = [1] * (10 ** 6)\n #b = [2] * (2 * 10 ** 7)\n #del b\n a = np.ones((2 ** 30), dtype=np.uint8)\n #df = pd.read_csv(\"../../data/nyc_taxi.csv\")\n #df2 = df.dropna()\n #df3 = df2.set_index('timestamp')\n test = np.ones((2**8), dtype=np.uint8)\n d = a.copy()\n return d\n"
] |
[
[
"numpy.ones"
]
] |
KoksiHub/Methods-For-SHREC-2021-Protein-Retrieval-and-Classification-Datasets
|
[
"c0565ee5290ce658e03b7770fdfb798dd619c106"
] |
[
"method HAPPS for Geometry Data/HAPPS_3Dretrievalmethod.py"
] |
[
"\n'''\nThis Script Can Run Alone.\n\nAuthor: Ekpo Otu (e-mail: eko @aber.ac.uk)\n\n\nShape Retrieval Contest (SHREC 2021 Protein Shape Benchmark Dataset): Track 3: Retrieval and Classification of Protein Surfaces Equiped with Physical and Chemical Properties\nDATASET: Geometry (i.e. 3D triangular Meshes)\nTRAININD DATASET: The training set countains 3,585 models. Divided into 144 Classes.\nTESTING DATASET: The test set contains 1,543 models.\n'''\n\n\n#Import the needed library.\nimport utilities01 as fnc\nimport numpy as np\nfrom numpy.core.umath_tests import inner1d\nnp.set_printoptions(suppress = True) #Suppress the printing of numbers in Scientific Notation or Exponential Form.\n\nimport matplotlib.pyplot as plt\nimport open3d\nimport trimesh\nfrom sklearn import preprocessing\n\nimport ntpath\nimport gc\n\n# ------------------------------------------------------------------------------------------------- #\n# TRAINING dataset, in the Geometry category (i.e. .OFF mesh files)\nfilepath = \"../dataset/1.off\" \nfilename = \"1.off\" \n\n# Where to save outputs\noutputdir = \"../outputdir/\"\n\n# Number of Random Points to sample from 3D Triangular Mesh\nN = 3500 #4500\n\n# Number of Bins to use for the multi-dimensional histogram\nnBins = 8\n\n# Note, the LARGER the value of 'r', the more the number of points in the 'Local Surface Patch(s)' extracted.\n# Example if 'r' = 0.17, about 26 points are obtained per LSP, and when 'r'= 0.27, about 65 points are obtained for the same LSP, instead.\nr = 0.50 #0.40\nvs = 0.30 \n\n# ------------------------------------------------------------------------------------------------- #\n# Load-in 3D Model or Shape.\nmesh = trimesh.load_mesh(filepath)\n\n# Sample N points from the surface of the MESH - Gives 'pointCloud'.\n# Use trimesh's native Function - 'trimesh.sample.sample_surface(mesh, N)' to Generate N-Randomly Sampled points = 'PointCloud'\nPs = trimesh.sample.sample_surface(mesh, N)[0]\n\n# Compute Ns for Ps\nPs, Ns = fnc.Ps_vs_Ns(Ps)\n# ------------------------------------------------------------------------------------------------- #\n\n\n# Function to compute HAPPS (6D-APPFD + HoGD)\ndef compute_HAPPS(Ps, Ns, filename, outputdir, N, r, vs, nBins):\n\t''' \n\tINPUT: \n\ti. Ps: N x 3 array, PointsCloud for a single 3D model.\n\n\tii. Ns: Normal Vectors correspoinding to every points in the Ps.\n\n\tiii. filename: Specific filename of a single 3D model to be read and processed. \n\t\tExample: cat4.off\n\n\tiv. outputdir: Path/directory where output data/values from this function should be saved to. \n\t\tExample: \"c:/outputdir/\"\n\n\tv. N - Number 'Random'/'Uniform' Samples from 3D Triangular Mesh (i.e filename.obj). Default N = 4500\n\n\tvi. r (Float: Default = 0.27): Radius param, used by r-nn search to determine the size of Local Surface Patch (LSP) or Region. \n\n\tvii. nBins = 7 or 8 #Number of bins for the multi-dimensional histogram of the Features. Default = 4.\n\n\tviii. vs(Float, Default = 0.15): Parameter to be used by the Voxel Down-Sampling function of Open3D.\n\n\tOUTPUT:\n\ti. HAPPS = [...]: which becomes the final SD for the given 3D model for a single 3D input model.\n\tWe have scaled our input model or 'Ps' before Features Extraction - such that its RMS distance from origin = 1.\n\n\tAuthor: Ekpo Otu ([email protected])\n\t'''\n\tbasename = ntpath.basename(filename)[:-4] \n\tpcd.points = open3d.Vector3dVector(Ps)\n\tdsc = fnc.ds_Open3d(pcd, vs)\n\tprint(\"\\nOUTPUT: Downsampled Cloud Size:\\t\", len(dsc))\n\n\tadsc, adscn = fnc.getActual(dsc, Ps, Ns)\n\taccummulated_final_appFeats = []\n\tfor pnt in range(0, len(adsc)):\n\t\tip = adsc[pnt]\n\t\tipn = adscn[pnt]\n\t\tnn, nNs = fnc.rnn_normals_skl(Ps, Ns, ip, r, leafSize = 30)\n\t\tpatchCentre = np.mean(nn, axis = 0)\n\t\tlocation = ip - patchCentre\n\t\tlsp = fnc.gsp(nn, nNs) \n\t\tlsp_pairs = fnc.gpe(lsp, comb = 2) \n\t\tp1 = lsp_pairs[:, 0, 0, :] \n\t\tp2 = lsp_pairs[:, 1, 0, :] \n\t\tn1 = lsp_pairs[:, 0, 1, :] \n\t\tn2 = lsp_pairs[:, 1, 1, :]\n\t\t\n\t\tp2_p1 = lsp_pairs[:, 1, 0, :] - lsp_pairs[:, 0, 0, :] \n\t\tp1_p2 = lsp_pairs[:, 0, 0, :] - lsp_pairs[:, 1, 0, :] \n\t\t\n\t\tlhs = abs(np.einsum('ij,ij->i', lsp_pairs[:, 0, 1, :], (lsp_pairs[:, 1, 0, :] - lsp_pairs[:, 0, 0, :]))) #Left-Hand-Side\n\t\tlhs[np.isnan(lhs)] = 0. \n\t\trhs = abs(np.einsum('ij,ij->i', lsp_pairs[:, 1, 1, :], (lsp_pairs[:, 1, 0, :] - lsp_pairs[:, 0, 0, :]))) #Right-Hand-Side\n\t\trhs[np.isnan(rhs)] = 0. \n \n\t\tvecs1 = p1 - patchCentre\n\t\tvecs2 = p1 - location\n\t\t\n\t\tlhs_angles1 = fnc.angrw(p1_p2, vecs1) \n\t\tlhs_angles2 = fnc.angrw(p1_p2, vecs2)\n\t\t\n\t\tcrossP1 = np.cross(p2_p1, n1)\n\t\tcrossP1[np.isnan(crossP1)] = 0.\n\t\tV1 = fnc.div0(crossP1, np.sqrt(inner1d(crossP1, crossP1))[:, None])\n\t \n\t\tW1 = np.cross(n1, V1)\n\t\tW1[np.isnan(W1)] = 0.\n\t\tx = np.einsum('ij,ij->i', W1, lsp_pairs[:, 1, 1, :]) \n\t\tx[np.isnan(x)] = 0. \n\t\ty = np.einsum('ij,ij->i', n1, lsp_pairs[:, 1, 1, :]) \n\t\ty[np.isnan(y)] = 0. \n\t\talpha1 = np.arctan2(x, y) \n\t\tbeta1 = np.einsum('ij,ij->i', V1, lsp_pairs[:, 1, 1, :]) \n\t\t \n\t\tnormedP1 = fnc.div0(p2_p1, np.sqrt(inner1d(p2_p1, p2_p1))[:, None]) \n\t\tgamma1 = np.einsum('ij,ij->i', n1, normedP1)\n\t\trheo1 = np.sqrt(inner1d(p2_p1, p2_p1))\n\t\t\n\t\trppf_lhs = np.column_stack((lhs_angles1, lhs_angles2, alpha1, beta1, gamma1, rheo1))\n\t\tindx = np.asarray(np.nonzero(lhs <= rhs))\n\t\tfinal_rppf_lhs = np.squeeze(rppf_lhs[indx], axis = 0) \n\t\t\n\t\tvecs1x = p2 - patchCentre\n\t\tvecs2x = p2 - location\n\t\trhs_angles1 = fnc.angrw(p2_p1, vecs1x)\n\t\trhs_angles2 = fnc.angrw(p2_p1, vecs2x)\n\t\tcrossP2 = np.cross(p1_p2, n2)\n\t\tcrossP2[np.isnan(crossP2)] = 0.\n\t\t\n\t\tV2 = fnc.div0(crossP2, np.sqrt(inner1d(crossP2, crossP2))[:, None])\n\t\tW2 = np.cross(n2, V2)\n\t\tW2[np.isnan(W2)] = 0.\n\t\tx2 = np.einsum('ij,ij->i', W2, lsp_pairs[:, 0, 1, :])\n\t\tx2[np.isnan(x2)] = 0.\n\t\ty2 = np.einsum('ij,ij->i', n2, lsp_pairs[:, 0, 1, :])\n\t\ty2[np.isnan(y2)] = 0.\n\t\t\n\t\talpha2 = np.arctan2(x2, y2)\n\t\tbeta2 = np.einsum('ij,ij->i', V2, lsp_pairs[:, 0, 1, :])\n\t\tnormedP2 = fnc.div0(p1_p2, np.sqrt(inner1d(p1_p2, p1_p2))[:, None])\n\t\tgamma2 = np.einsum('ij,ij->i', n2, normedP2)\n\t\trheo2 = np.sqrt(inner1d(p1_p2, p1_p2))\n\t\t\n\t\trppf_rhs = np.column_stack((rhs_angles1, rhs_angles2, alpha2, beta2, gamma2, rheo2))\n\t\tindxx = np.asarray(np.nonzero(lhs > rhs))\n\t\tfinal_rppf_rhs = np.squeeze(rppf_rhs[indxx], axis = 0)\n\n\t\tfull_final_fppf = np.vstack((final_rppf_lhs, final_rppf_rhs))\n\t\t\n\t\tcolumns_1to5 = preprocessing.minmax_scale(full_final_fppf[:, 0:5])\n\t\tcolumn_6 = full_final_fppf[:, 5]\n\t\tnormalizedfeats = np.column_stack((columns_1to5, column_6))\n\t\t\n\t\taccummulated_final_appFeats.append(normalizedfeats)\n\taccummulated_final_appFeats = np.vstack(accummulated_final_appFeats)\n\tappfd = fnc.multi_dim_hist(accummulated_final_appFeats, nBins) #APPFD - Augmented Point-Pairs Features Descriptors\n\t\n\t#Compute HoGD\n\tct = np.mean(Ps, axis = 0)\n\tdist = np.sqrt(np.sum((ct - Ps)**2,axis = 1))\n\tldata = Ps.shape[0]\n\tbins2 = 65\n\thisto, _ = np.histogram(dist, bins = bins2, density = False)\n\tnorm_hist = histo.astype(np.float32) / histo.sum()\n\t\n\t# HAPPS\n\thapps = np.hstack((appfd, norm_hist))\n\thapps = happs.astype(np.float32) / happs.sum()\n\td = len(happs)\n\tgc.collect()\n\tplt.plot(happs, color = 'darkred', label = \"$Descr.$\")\n\tplt.xlabel('{}-Dim. Descr - File: {}'.format(d, basename))\n\tplt.ylabel('Frequency of Descriptor Data (PDF)')\n\tplt.title(\"HAPPS (APPFD+HoDD)-{}vs{} Bins\".format(nBins, bins2))\n\tplt.legend()\n\tplt.savefig(outputdir + '{}_happs'.format(basename) + str(N) + 'pts.pdf')\n\tplt.close()\n\n\treturn happs\n\t\n# ------------------------------------------------------------------------------------------------- #\n# Compute, save and return HAPPS for a single 3D mesh.\nstartTime = time.time()\n\nhapps_descr = compute_HAPPS(Ps, Ns, filename, outputdir, N, r, vs, nBins)\n\nstopTime = time.time()\nduration = stopTime - startTime\n# ------------------------------------------------------------------------------------------------- #\n\nprint('HAPPS successfully computed!!!\\n Descriptor Dimension:\\t', len(happs_descr))\nprint(\"Total Computation Time for {}:\\t\".format(filename), str(duration) + 'secs.')"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.einsum",
"numpy.core.umath_tests.inner1d",
"numpy.squeeze",
"matplotlib.pyplot.plot",
"numpy.arctan2",
"numpy.mean",
"numpy.cross",
"numpy.histogram",
"numpy.hstack",
"matplotlib.pyplot.close",
"numpy.column_stack",
"numpy.nonzero",
"numpy.isnan",
"sklearn.preprocessing.minmax_scale",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"numpy.set_printoptions",
"numpy.vstack"
]
] |
AbbyGi/tiled
|
[
"a7f1809f0d8448fce3540c15d7697cb9454fedca"
] |
[
"tiled/adapters/hdf5.py"
] |
[
"import collections.abc\nimport warnings\n\nimport dask.array\nimport h5py\nimport numpy\n\nfrom ..adapters.utils import IndexersMixin, tree_repr\nfrom ..utils import DictView\nfrom .array import ArrayAdapter\n\n\nclass HDF5DatasetAdapter(ArrayAdapter):\n # TODO Just wrap h5py.Dataset directly, not via dask.array.\n def __init__(self, dataset):\n super().__init__(dask.array.from_array(dataset), metadata=dataset.attrs)\n\n\nclass HDF5Adapter(collections.abc.Mapping, IndexersMixin):\n \"\"\"\n Read an HDF5 file or a group within one.\n\n This map the structure of an HDF5 file onto a \"Tree\" of array structures.\n\n Examples\n --------\n\n From the root node of a file given a filepath\n\n >>> import h5py\n >>> HDF5Adapter.from_file(\"path/to/file.h5\")\n\n From the root node of a file given an h5py.File object\n\n >>> import h5py\n >>> file = h5py.File(\"path/to/file.h5\")\n >>> HDF5Adapter.from_file(file)\n\n From a group within a file\n\n >>> import h5py\n >>> file = h5py.File(\"path/to/file.h5\")\n >>> HDF5Adapter(file[\"some_group'][\"some_sub_group\"])\n\n \"\"\"\n\n def __init__(self, node, access_policy=None, authenticated_identity=None):\n self._node = node\n if (access_policy is not None) and (\n not access_policy.check_compatibility(self)\n ):\n raise ValueError(\n f\"Access policy {access_policy} is not compatible with this Tree.\"\n )\n self._access_policy = access_policy\n self._authenticated_identity = authenticated_identity\n super().__init__()\n\n @classmethod\n def from_file(cls, file):\n if not isinstance(file, h5py.File):\n file = h5py.File(file, \"r\")\n return cls(file)\n\n def __repr__(self):\n return tree_repr(self, list(self))\n\n @property\n def access_policy(self):\n return self._access_policy\n\n @property\n def authenticated_identity(self):\n return self._authenticated_identity\n\n def authenticated_as(self, identity):\n if self._authenticated_identity is not None:\n raise RuntimeError(\n f\"Already authenticated as {self.authenticated_identity}\"\n )\n if self._access_policy is not None:\n raise NotImplementedError\n else:\n tree = type(self)(\n self._node,\n access_policy=self._access_policy,\n authenticated_identity=identity,\n )\n return tree\n\n @property\n def metadata(self):\n d = dict(self._node.attrs)\n for k, v in list(d.items()):\n # Convert any bytes to str.\n if isinstance(v, bytes):\n d[k] = v.decode()\n return DictView(d)\n\n def __iter__(self):\n yield from self._node\n\n def __getitem__(self, key):\n value = self._node[key]\n if isinstance(value, h5py.Group):\n return HDF5Adapter(value)\n else:\n if value.dtype == numpy.dtype(\"O\"):\n warnings.warn(\n f\"The dataset {key} is of object type, using a \"\n \"Python-only feature of h5py that is not supported by \"\n \"HDF5 in general. Read more about that feature at \"\n \"https://docs.h5py.org/en/stable/special.html. \"\n \"Consider using a fixed-length field instead. \"\n \"Tiled will serve an empty placeholder.\"\n )\n return HDF5DatasetAdapter(numpy.array([]))\n return HDF5DatasetAdapter(value)\n\n def __len__(self):\n return len(self._node)\n\n def search(self, query):\n \"\"\"\n Return a Tree with a subset of the mapping.\n \"\"\"\n raise NotImplementedError\n\n # The following three methods are used by IndexersMixin\n # to define keys_indexer, items_indexer, and values_indexer.\n\n def _keys_slice(self, start, stop, direction):\n keys = list(self._node)\n if direction < 0:\n keys = reversed(keys)\n return keys[start:stop]\n\n def _items_slice(self, start, stop, direction):\n items = [(key, self[key]) for key in list(self)]\n if direction < 0:\n items = reversed(items)\n return items[start:stop]\n\n def _item_by_index(self, index, direction):\n keys = list(self)\n if direction < 0:\n keys = reversed(keys)\n return keys[index]\n"
] |
[
[
"numpy.array",
"numpy.dtype"
]
] |
ZhengyiLuo/humor
|
[
"72c1d507c025de2d214c2d7def815f6da225befa"
] |
[
"humor/utils/torch.py"
] |
[
"\nimport sys, os, time\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\ndef get_device(gpu_idx=0):\n '''\n Returns the pytorch device for the given gpu index.\n '''\n gpu_device_str = 'cuda:%d' % (gpu_idx)\n device_str = gpu_device_str if torch.cuda.is_available() else 'cpu'\n if device_str == gpu_device_str:\n print('Using detected GPU...')\n device_str = 'cuda:0'\n else:\n print('No detected GPU...using CPU.')\n device = torch.device(device_str)\n return device\n\ndef torch_to_numpy(tensor_list):\n return [x.to('cpu').data.numpy() for x in tensor_list]\n\ndef torch_to_scalar(tensor_list):\n return [x.to('cpu').item() for x in tensor_list]\n\ncopy2cpu = lambda tensor: tensor.detach().cpu().numpy()\n\ndef save_state(file_out, model, optimizer, cur_epoch=0, min_val_loss=float('Inf'), min_train_loss=float('Inf'), ignore_keys=None):\n model_state_dict = model.state_dict()\n if ignore_keys is not None:\n model_state_dict = {k: v for k, v in model_state_dict.items() if k.split('.')[0] not in ignore_keys}\n\n full_checkpoint_dict = {\n 'model' : model_state_dict,\n 'optim' : optimizer.state_dict(),\n 'epoch' : cur_epoch,\n 'min_val_loss' : min_val_loss,\n 'min_train_loss' : min_train_loss\n }\n torch.save(full_checkpoint_dict, file_out)\n\ndef load_state(load_path, model, optimizer=None, is_parallel=False, map_location=None, ignore_keys=None):\n if not os.path.exists(load_path):\n print('Could not find checkpoint at path ' + load_path)\n\n full_checkpoint_dict = torch.load(load_path, map_location=map_location)\n model_state_dict = full_checkpoint_dict['model']\n optim_state_dict = full_checkpoint_dict['optim']\n\n # load model weights\n for k, v in model_state_dict.items():\n if k.split('.')[0] == 'module' and not is_parallel:\n # then it was trained with Data parallel\n print('Loading weights trained with DataParallel...')\n model_state_dict = {'.'.join(k.split('.')[1:]) : v for k, v in model_state_dict.items() if k.split('.')[0] == 'module'}\n break\n \n if ignore_keys is not None:\n model_state_dict = {k: v for k, v in model_state_dict.items() if k.split('.')[0] not in ignore_keys}\n \n # overwrite entries in the existing state dict\n missing_keys, unexpected_keys = model.load_state_dict(model_state_dict, strict=False)\n if ignore_keys is not None:\n missing_keys = [k for k in missing_keys if k.split('.')[0] not in ignore_keys]\n unexpected_keys = [k for k in unexpected_keys if k.split('.')[0] not in ignore_keys]\n if len(missing_keys) > 0:\n print('WARNING: The following keys could not be found in the given state dict - ignoring...')\n print(missing_keys)\n if len(unexpected_keys) > 0:\n print('WARNING: The following keys were found in the given state dict but not in the current model - ignoring...')\n print(unexpected_keys)\n\n # load optimizer weights\n if optimizer is not None:\n optimizer.load_state_dict(optim_state_dict)\n\n min_train_loss = float('Inf')\n if 'min_train_loss' in full_checkpoint_dict.keys():\n min_train_loss = full_checkpoint_dict['min_train_loss']\n\n return full_checkpoint_dict['epoch'], full_checkpoint_dict['min_val_loss'], min_train_loss\n"
] |
[
[
"torch.device",
"torch.load",
"torch.cuda.is_available",
"torch.save"
]
] |
ronicamja/imageFilters
|
[
"d46f4c33b60ddf787fc03d0a1fa60a3d8fa0259a"
] |
[
"imageIO.py"
] |
[
"import imageio\nimport numpy as np\nimport os, log\ndef readImage(filename):\n if os.path.exists(filename):\n log.status(\"reading from \" + filename)\n first = imageio.imread(filename)\n first.tofile('.temp.raw')\n imageMemmap = np.memmap('.temp.raw', dtype=np.uint8, shape=first.shape)\n log.status(\"successfully read from\" + filename)\n try:\n os.remove(\".temp.raw\")\n finally:\n return imageMemmap\n \n else:\n log.error(\"can't read from \" + filename)\n return None\n\ndef writeImage(imageMemmap,filename):\n log.status(\"writing image to \"+filename)\n imageio.imwrite(filename, imageMemmap)\n log.status(\"image successfully write to \" + filename)"
] |
[
[
"numpy.memmap"
]
] |
ozendelait/pytorch-semseg
|
[
"200491febd653bd26befcd5b3d52c614aa832b7e"
] |
[
"ptsemseg/models/utils.py"
] |
[
"import torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\n\n\nclass conv2DBatchNorm(nn.Module):\n def __init__(\n self,\n in_channels,\n n_filters,\n k_size,\n stride,\n padding,\n bias=True,\n dilation=1,\n is_batchnorm=True,\n ):\n super(conv2DBatchNorm, self).__init__()\n\n conv_mod = nn.Conv2d(\n int(in_channels),\n int(n_filters),\n kernel_size=k_size,\n padding=padding,\n stride=stride,\n bias=bias,\n dilation=dilation,\n )\n\n if is_batchnorm:\n self.cb_unit = nn.Sequential(conv_mod, nn.BatchNorm2d(int(n_filters)))\n else:\n self.cb_unit = nn.Sequential(conv_mod)\n\n def forward(self, inputs):\n outputs = self.cb_unit(inputs)\n return outputs\n\n\nclass conv2DGroupNorm(nn.Module):\n def __init__(\n self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, n_groups=16\n ):\n super(conv2DGroupNorm, self).__init__()\n\n conv_mod = nn.Conv2d(\n int(in_channels),\n int(n_filters),\n kernel_size=k_size,\n padding=padding,\n stride=stride,\n bias=bias,\n dilation=dilation,\n )\n\n self.cg_unit = nn.Sequential(conv_mod, nn.GroupNorm(n_groups, int(n_filters)))\n\n def forward(self, inputs):\n outputs = self.cg_unit(inputs)\n return outputs\n\n\nclass deconv2DBatchNorm(nn.Module):\n def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):\n super(deconv2DBatchNorm, self).__init__()\n\n self.dcb_unit = nn.Sequential(\n nn.ConvTranspose2d(\n int(in_channels),\n int(n_filters),\n kernel_size=k_size,\n padding=padding,\n stride=stride,\n bias=bias,\n ),\n nn.BatchNorm2d(int(n_filters)),\n )\n\n def forward(self, inputs):\n outputs = self.dcb_unit(inputs)\n return outputs\n\n\nclass conv2DBatchNormRelu(nn.Module):\n def __init__(\n self,\n in_channels,\n n_filters,\n k_size,\n stride,\n padding,\n bias=True,\n dilation=1,\n is_batchnorm=True,\n ):\n super(conv2DBatchNormRelu, self).__init__()\n\n conv_mod = nn.Conv2d(\n int(in_channels),\n int(n_filters),\n kernel_size=k_size,\n padding=padding,\n stride=stride,\n bias=bias,\n dilation=dilation,\n )\n\n if is_batchnorm:\n self.cbr_unit = nn.Sequential(\n conv_mod, nn.BatchNorm2d(int(n_filters)), nn.ReLU(inplace=True)\n )\n else:\n self.cbr_unit = nn.Sequential(conv_mod, nn.ReLU(inplace=True))\n\n def forward(self, inputs):\n outputs = self.cbr_unit(inputs)\n return outputs\n\n\nclass conv2DGroupNormRelu(nn.Module):\n def __init__(\n self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, n_groups=16\n ):\n super(conv2DGroupNormRelu, self).__init__()\n\n conv_mod = nn.Conv2d(\n int(in_channels),\n int(n_filters),\n kernel_size=k_size,\n padding=padding,\n stride=stride,\n bias=bias,\n dilation=dilation,\n )\n\n self.cgr_unit = nn.Sequential(\n conv_mod, nn.GroupNorm(n_groups, int(n_filters)), nn.ReLU(inplace=True)\n )\n\n def forward(self, inputs):\n outputs = self.cgr_unit(inputs)\n return outputs\n\n\nclass deconv2DBatchNormRelu(nn.Module):\n def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):\n super(deconv2DBatchNormRelu, self).__init__()\n\n self.dcbr_unit = nn.Sequential(\n nn.ConvTranspose2d(\n int(in_channels),\n int(n_filters),\n kernel_size=k_size,\n padding=padding,\n stride=stride,\n bias=bias,\n ),\n nn.BatchNorm2d(int(n_filters)),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, inputs):\n outputs = self.dcbr_unit(inputs)\n return outputs\n\n\nclass unetConv2(nn.Module):\n def __init__(self, in_size, out_size, is_batchnorm):\n super(unetConv2, self).__init__()\n\n if is_batchnorm:\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_size, out_size, 3, 1, 0), nn.BatchNorm2d(out_size), nn.ReLU()\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(out_size, out_size, 3, 1, 0), nn.BatchNorm2d(out_size), nn.ReLU()\n )\n else:\n self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 0), nn.ReLU())\n self.conv2 = nn.Sequential(nn.Conv2d(out_size, out_size, 3, 1, 0), nn.ReLU())\n\n def forward(self, inputs):\n outputs = self.conv1(inputs)\n outputs = self.conv2(outputs)\n return outputs\n\n\nclass unetUp(nn.Module):\n def __init__(self, in_size, out_size, is_deconv):\n super(unetUp, self).__init__()\n self.conv = unetConv2(in_size, out_size, False)\n if is_deconv:\n self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)\n else:\n self.up = nn.UpsamplingBilinear2d(scale_factor=2)\n\n def forward(self, inputs1, inputs2):\n outputs2 = self.up(inputs2)\n offset = outputs2.size()[2] - inputs1.size()[2]\n padding = 2 * [offset // 2, offset // 2]\n outputs1 = F.pad(inputs1, padding)\n return self.conv(torch.cat([outputs1, outputs2], 1))\n\n\nclass segnetDown2(nn.Module):\n def __init__(self, in_size, out_size):\n super(segnetDown2, self).__init__()\n self.conv1 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)\n self.conv2 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)\n self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)\n\n def forward(self, inputs):\n outputs = self.conv1(inputs)\n outputs = self.conv2(outputs)\n unpooled_shape = outputs.size()\n outputs, indices = self.maxpool_with_argmax(outputs)\n return outputs, indices, unpooled_shape\n\n\nclass segnetDown3(nn.Module):\n def __init__(self, in_size, out_size):\n super(segnetDown3, self).__init__()\n self.conv1 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)\n self.conv2 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)\n self.conv3 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)\n self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)\n\n def forward(self, inputs):\n outputs = self.conv1(inputs)\n outputs = self.conv2(outputs)\n outputs = self.conv3(outputs)\n unpooled_shape = outputs.size()\n outputs, indices = self.maxpool_with_argmax(outputs)\n return outputs, indices, unpooled_shape\n\n\nclass segnetUp2(nn.Module):\n def __init__(self, in_size, out_size):\n super(segnetUp2, self).__init__()\n self.unpool = nn.MaxUnpool2d(2, 2)\n self.conv1 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)\n self.conv2 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)\n\n def forward(self, inputs, indices, output_shape):\n outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape)\n outputs = self.conv1(outputs)\n outputs = self.conv2(outputs)\n return outputs\n\n\nclass segnetUp3(nn.Module):\n def __init__(self, in_size, out_size):\n super(segnetUp3, self).__init__()\n self.unpool = nn.MaxUnpool2d(2, 2)\n self.conv1 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)\n self.conv2 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)\n self.conv3 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)\n\n def forward(self, inputs, indices, output_shape):\n outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape)\n outputs = self.conv1(outputs)\n outputs = self.conv2(outputs)\n outputs = self.conv3(outputs)\n return outputs\n\n\nclass residualBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_channels, n_filters, stride=1, downsample=None):\n super(residualBlock, self).__init__()\n\n self.convbnrelu1 = conv2DBatchNormRelu(in_channels, n_filters, 3, stride, 1, bias=False)\n self.convbn2 = conv2DBatchNorm(n_filters, n_filters, 3, 1, 1, bias=False)\n self.downsample = downsample\n self.stride = stride\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n residual = x\n\n out = self.convbnrelu1(x)\n out = self.convbn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n return out\n\n\nclass residualBottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_channels, n_filters, stride=1, downsample=None):\n super(residualBottleneck, self).__init__()\n self.convbn1 = nn.Conv2DBatchNorm(in_channels, n_filters, k_size=1, bias=False)\n self.convbn2 = nn.Conv2DBatchNorm(\n n_filters, n_filters, k_size=3, padding=1, stride=stride, bias=False\n )\n self.convbn3 = nn.Conv2DBatchNorm(n_filters, n_filters * 4, k_size=1, bias=False)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.convbn1(x)\n out = self.convbn2(out)\n out = self.convbn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass linknetUp(nn.Module):\n def __init__(self, in_channels, n_filters):\n super(linknetUp, self).__init__()\n\n # B, 2C, H, W -> B, C/2, H, W\n self.convbnrelu1 = conv2DBatchNormRelu(\n in_channels, n_filters / 2, k_size=1, stride=1, padding=1\n )\n\n # B, C/2, H, W -> B, C/2, H, W\n self.deconvbnrelu2 = nn.deconv2DBatchNormRelu(\n n_filters / 2, n_filters / 2, k_size=3, stride=2, padding=0\n )\n\n # B, C/2, H, W -> B, C, H, W\n self.convbnrelu3 = conv2DBatchNormRelu(\n n_filters / 2, n_filters, k_size=1, stride=1, padding=1\n )\n\n def forward(self, x):\n x = self.convbnrelu1(x)\n x = self.deconvbnrelu2(x)\n x = self.convbnrelu3(x)\n return x\n\n\nclass FRRU(nn.Module):\n \"\"\"\n Full Resolution Residual Unit for FRRN\n \"\"\"\n\n def __init__(self, prev_channels, out_channels, scale, group_norm=False, n_groups=None):\n super(FRRU, self).__init__()\n self.scale = scale\n self.prev_channels = prev_channels\n self.out_channels = out_channels\n self.group_norm = group_norm\n self.n_groups = n_groups\n\n if self.group_norm:\n conv_unit = conv2DGroupNormRelu\n self.conv1 = conv_unit(\n prev_channels + 32,\n out_channels,\n k_size=3,\n stride=1,\n padding=1,\n bias=False,\n n_groups=self.n_groups,\n )\n self.conv2 = conv_unit(\n out_channels,\n out_channels,\n k_size=3,\n stride=1,\n padding=1,\n bias=False,\n n_groups=self.n_groups,\n )\n\n else:\n conv_unit = conv2DBatchNormRelu\n self.conv1 = conv_unit(\n prev_channels + 32, out_channels, k_size=3, stride=1, padding=1, bias=False\n )\n self.conv2 = conv_unit(\n out_channels, out_channels, k_size=3, stride=1, padding=1, bias=False\n )\n\n self.conv_res = nn.Conv2d(out_channels, 32, kernel_size=1, stride=1, padding=0)\n\n def forward(self, y, z):\n # print(\"SIZES0: \", y.shape, z.shape)\n x = torch.cat([y, nn.MaxPool2d(self.scale, self.scale)(z)], dim=1)\n y_prime = self.conv1(x)\n #print(\"SIZES1: \", x.shape, y_prime.shape)\n y_prime = self.conv2(y_prime)\n x = self.conv_res(y_prime)\n #print(\"SIZES2: \", x.shape, y_prime.shape)\n upsample_size = [_s * self.scale for _s in y_prime.shape[-2:]]\n x = F.interpolate(x, size=upsample_size, mode=\"nearest\", align_corners=None)\n #print(\"SIZES3: \", x.shape, z.shape, upsample_size, self.scale)\n #z_prime = torch.narrow(torch.narrow(z, 2, 0, x.size()[2]), 3, 0, x.size()[3]) + x\n z_prime = z + x\n #print(\"SIZES1: \", z_prime.shape)\n return y_prime, z_prime\n\n\nclass RU(nn.Module):\n \"\"\"\n Residual Unit for FRRN\n \"\"\"\n\n def __init__(self, channels, kernel_size=3, strides=1, group_norm=False, n_groups=None):\n super(RU, self).__init__()\n self.group_norm = group_norm\n self.n_groups = n_groups\n\n if self.group_norm:\n self.conv1 = conv2DGroupNormRelu(\n channels,\n channels,\n k_size=kernel_size,\n stride=strides,\n padding=1,\n bias=False,\n n_groups=self.n_groups,\n )\n self.conv2 = conv2DGroupNorm(\n channels,\n channels,\n k_size=kernel_size,\n stride=strides,\n padding=1,\n bias=False,\n n_groups=self.n_groups,\n )\n\n else:\n self.conv1 = conv2DBatchNormRelu(\n channels, channels, k_size=kernel_size, stride=strides, padding=1, bias=False\n )\n self.conv2 = conv2DBatchNorm(\n channels, channels, k_size=kernel_size, stride=strides, padding=1, bias=False\n )\n\n def forward(self, x):\n incoming = x\n x = self.conv1(x)\n x = self.conv2(x)\n return x + incoming\n\n\nclass residualConvUnit(nn.Module):\n def __init__(self, channels, kernel_size=3):\n super(residualConvUnit, self).__init__()\n\n self.residual_conv_unit = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.Conv2d(channels, channels, kernel_size=kernel_size),\n nn.ReLU(inplace=True),\n nn.Conv2d(channels, channels, kernel_size=kernel_size),\n )\n\n def forward(self, x):\n input = x\n x = self.residual_conv_unit(x)\n return x + input\n\n\nclass multiResolutionFusion(nn.Module):\n def __init__(self, channels, up_scale_high, up_scale_low, high_shape, low_shape):\n super(multiResolutionFusion, self).__init__()\n\n self.up_scale_high = up_scale_high\n self.up_scale_low = up_scale_low\n\n self.conv_high = nn.Conv2d(high_shape[1], channels, kernel_size=3)\n\n if low_shape is not None:\n self.conv_low = nn.Conv2d(low_shape[1], channels, kernel_size=3)\n\n def forward(self, x_high, x_low):\n high_upsampled = F.upsample(\n self.conv_high(x_high), scale_factor=self.up_scale_high, mode=\"bilinear\", align_corners=False \n )\n\n if x_low is None:\n return high_upsampled\n\n low_upsampled = F.upsample(\n self.conv_low(x_low), scale_factor=self.up_scale_low, mode=\"bilinear\", align_corners=False\n )\n\n return low_upsampled + high_upsampled\n\n\nclass chainedResidualPooling(nn.Module):\n def __init__(self, channels, input_shape):\n super(chainedResidualPooling, self).__init__()\n\n self.chained_residual_pooling = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.MaxPool2d(5, 1, 2),\n nn.Conv2d(input_shape[1], channels, kernel_size=3),\n )\n\n def forward(self, x):\n input = x\n x = self.chained_residual_pooling(x)\n return x + input\n\n\nclass pyramidPooling(nn.Module):\n def __init__(\n self, in_channels, pool_sizes, model_name=\"pspnet\", fusion_mode=\"cat\", is_batchnorm=True\n ):\n super(pyramidPooling, self).__init__()\n\n bias = not is_batchnorm\n\n self.paths = []\n for i in range(len(pool_sizes)):\n self.paths.append(\n conv2DBatchNormRelu(\n in_channels,\n int(in_channels / len(pool_sizes)),\n 1,\n 1,\n 0,\n bias=bias,\n is_batchnorm=is_batchnorm,\n )\n )\n\n self.path_module_list = nn.ModuleList(self.paths)\n self.pool_sizes = pool_sizes\n self.model_name = model_name\n self.fusion_mode = fusion_mode\n\n def forward(self, x):\n h, w = x.shape[2:]\n\n \n k_sizes = []\n strides = []\n #FullHD: [(5, 10), (11, 20), (17, 30), (34, 60)] [(5, 10), (11, 20), (17, 30), (34, 60)]\n #self.training or\n # replicate strange k_size/stride offsets found in pretrained icnet networks\n # general settings or pspnet\n is_icnet = len(self.model_name) >= 5 and self.model_name[:5] == \"icnet\" and x.shape[2] == 33 and x.shape[3] == 65\n if is_icnet:\n add_icnet_str = [(3,5),(3,5),(1,1)]\n for idx, pool_size in enumerate(self.pool_sizes):\n ksz = (min(int(h / pool_size)-1,x.shape[2]), min(int(w / pool_size)-1,x.shape[3]))\n strides.append(ksz)\n if idx < len(add_icnet_str):\n k_sizes.append((ksz[0]+add_icnet_str[idx][0],ksz[1]+add_icnet_str[idx][1]))\n else:\n k_sizes.append(ksz)\n else:\n for pool_size in self.pool_sizes:\n ksz = (min(int(h / pool_size),x.shape[2]), min(int(w / pool_size),x.shape[3]))\n k_sizes.append(ksz)\n strides.append(ksz)\n #print(\"EVAL!! sz, h, w\", self.pool_sizes, h, w, k_sizes, strides)\n #else: # eval mode and icnet: pre-trained for 1025 x 2049\n # k_sizes = [(8, 15), (13, 25), (17, min(33, x.shape[2])), (33, min(65, x.shape[3]))]\n # strides = [(5, 10), (10, 20), (16, min(32, x.shape[2])), (33, min(65, x.shape[3]))]\n\n if self.fusion_mode == \"cat\": # pspnet: concat (including x)\n output_slices = [x]\n\n for i, (module, pool_size) in enumerate(zip(self.path_module_list, self.pool_sizes)):\n out = F.avg_pool2d(x, k_sizes[i], stride=strides[i], padding=0)\n # out = F.adaptive_avg_pool2d(x, output_size=(pool_size, pool_size))\n if self.model_name != \"icnet\":\n out = module(out)\n out = F.interpolate(out, size=(h, w), mode=\"bilinear\", align_corners=True)\n output_slices.append(out)\n\n return torch.cat(output_slices, dim=1)\n else: # icnet: element-wise sum (including x)\n pp_sum = x\n\n for i, (module, pool_size) in enumerate(zip(self.path_module_list, self.pool_sizes)):\n #print(\"INPUT_SZ \" + str(i) + \" \" + str(x.shape)+ \" k \"+ str(k_sizes[i])+ \" s \" + str(strides[i]))\n out = F.avg_pool2d(x, k_sizes[i], stride=strides[i], padding=0)\n #print(\"-> \" + str(out.shape))\n # out = F.adaptive_avg_pool2d(x, output_size=(pool_size, pool_size))\n if self.model_name != \"icnet\":\n out = module(out)\n out = F.interpolate(out, size=(h, w), mode=\"bilinear\", align_corners=True)\n pp_sum = pp_sum + out\n\n return pp_sum\n\n\nclass bottleNeckPSP(nn.Module):\n def __init__(\n self, in_channels, mid_channels, out_channels, stride, dilation=1, is_batchnorm=True\n ):\n super(bottleNeckPSP, self).__init__()\n\n bias = not is_batchnorm\n\n self.cbr1 = conv2DBatchNormRelu(\n in_channels, mid_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm\n )\n if dilation > 1:\n self.cbr2 = conv2DBatchNormRelu(\n mid_channels,\n mid_channels,\n 3,\n stride=stride,\n padding=dilation,\n bias=bias,\n dilation=dilation,\n is_batchnorm=is_batchnorm,\n )\n else:\n self.cbr2 = conv2DBatchNormRelu(\n mid_channels,\n mid_channels,\n 3,\n stride=stride,\n padding=1,\n bias=bias,\n dilation=1,\n is_batchnorm=is_batchnorm,\n )\n self.cb3 = conv2DBatchNorm(\n mid_channels, out_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm\n )\n self.cb4 = conv2DBatchNorm(\n in_channels,\n out_channels,\n 1,\n stride=stride,\n padding=0,\n bias=bias,\n is_batchnorm=is_batchnorm,\n )\n\n def forward(self, x):\n conv = self.cb3(self.cbr2(self.cbr1(x)))\n residual = self.cb4(x)\n return F.relu(conv + residual, inplace=True)\n\n\nclass bottleNeckIdentifyPSP(nn.Module):\n def __init__(self, in_channels, mid_channels, stride, dilation=1, is_batchnorm=True):\n super(bottleNeckIdentifyPSP, self).__init__()\n\n bias = not is_batchnorm\n\n self.cbr1 = conv2DBatchNormRelu(\n in_channels, mid_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm\n )\n if dilation > 1:\n self.cbr2 = conv2DBatchNormRelu(\n mid_channels,\n mid_channels,\n 3,\n stride=1,\n padding=dilation,\n bias=bias,\n dilation=dilation,\n is_batchnorm=is_batchnorm,\n )\n else:\n self.cbr2 = conv2DBatchNormRelu(\n mid_channels,\n mid_channels,\n 3,\n stride=1,\n padding=1,\n bias=bias,\n dilation=1,\n is_batchnorm=is_batchnorm,\n )\n self.cb3 = conv2DBatchNorm(\n mid_channels, in_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm\n )\n\n def forward(self, x):\n residual = x\n x = self.cb3(self.cbr2(self.cbr1(x)))\n return F.relu(x + residual, inplace=True)\n\n\nclass residualBlockPSP(nn.Module):\n def __init__(\n self,\n n_blocks,\n in_channels,\n mid_channels,\n out_channels,\n stride,\n dilation=1,\n include_range=\"all\",\n is_batchnorm=True,\n ):\n super(residualBlockPSP, self).__init__()\n\n if dilation > 1:\n stride = 1\n\n # residualBlockPSP = convBlockPSP + identityBlockPSPs\n layers = []\n if include_range in [\"all\", \"conv\"]:\n layers.append(\n bottleNeckPSP(\n in_channels,\n mid_channels,\n out_channels,\n stride,\n dilation,\n is_batchnorm=is_batchnorm,\n )\n )\n if include_range in [\"all\", \"identity\"]:\n for i in range(n_blocks - 1):\n layers.append(\n bottleNeckIdentifyPSP(\n out_channels, mid_channels, stride, dilation, is_batchnorm=is_batchnorm\n )\n )\n\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layers(x)\n\n\nclass cascadeFeatureFusion(nn.Module):\n def __init__(\n self, n_classes, low_in_channels, high_in_channels, out_channels, is_batchnorm=True\n ):\n super(cascadeFeatureFusion, self).__init__()\n\n bias = not is_batchnorm\n\n self.low_dilated_conv_bn = conv2DBatchNorm(\n low_in_channels,\n out_channels,\n 3,\n stride=1,\n padding=2,\n bias=bias,\n dilation=2,\n is_batchnorm=is_batchnorm,\n )\n self.low_classifier_conv = nn.Conv2d(\n int(low_in_channels),\n int(n_classes),\n kernel_size=1,\n padding=0,\n stride=1,\n bias=True,\n dilation=1,\n ) # Train only\n self.high_proj_conv_bn = conv2DBatchNorm(\n high_in_channels,\n out_channels,\n 1,\n stride=1,\n padding=0,\n bias=bias,\n is_batchnorm=is_batchnorm,\n )\n\n def forward(self, x_low, x_high):\n high_fm = self.high_proj_conv_bn(x_high)\n x_low_upsampled = F.interpolate(\n x_low, size=(high_fm.shape[2],high_fm.shape[3]), mode=\"bilinear\", align_corners=True\n )\n\n low_cls = self.low_classifier_conv(x_low_upsampled)\n low_fm = self.low_dilated_conv_bn(x_low_upsampled)\n \n high_fused_fm = F.relu(low_fm + high_fm, inplace=True)\n\n return high_fused_fm, low_cls\n\n\ndef get_interp_size(input, s_factor=1, z_factor=1): # for caffe\n ori_h, ori_w = input.shape[2:]\n\n # shrink (s_factor >= 1)\n ori_h = (ori_h - 1) / s_factor + 1\n ori_w = (ori_w - 1) / s_factor + 1\n\n # zoom (z_factor >= 1)\n ori_h = ori_h + (ori_h - 1) * (z_factor - 1)\n ori_w = ori_w + (ori_w - 1) * (z_factor - 1)\n\n resize_shape = (int(ori_h), int(ori_w))\n return resize_shape\n\n\ndef interp(input, output_size, mode=\"bilinear\"):\n n, c, ih, iw = input.shape\n oh, ow = output_size\n\n # normalize to [-1, 1]\n h = torch.arange(0, oh, dtype=torch.float, device=input.device) / (oh - 1) * 2 - 1\n w = torch.arange(0, ow, dtype=torch.float, device=input.device) / (ow - 1) * 2 - 1\n\n grid = torch.zeros(oh, ow, 2, dtype=torch.float, device=input.device)\n grid[:, :, 0] = w.unsqueeze(0).repeat(oh, 1)\n grid[:, :, 1] = h.unsqueeze(0).repeat(ow, 1).transpose(0, 1)\n grid = grid.unsqueeze(0).repeat(n, 1, 1, 1) # grid.shape: [n, oh, ow, 2]\n grid = Variable(grid)\n if input.is_cuda:\n grid = grid.cuda()\n\n return F.grid_sample(input, grid, mode=mode)\n\n\ndef get_upsampling_weight(in_channels, out_channels, kernel_size):\n \"\"\"Make a 2D bilinear kernel suitable for upsampling\"\"\"\n factor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:kernel_size, :kernel_size]\n filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64)\n weight[range(in_channels), range(out_channels), :, :] = filt\n return torch.from_numpy(weight).float()\n"
] |
[
[
"torch.zeros",
"torch.cat",
"torch.nn.functional.interpolate",
"torch.autograd.Variable",
"torch.from_numpy",
"torch.nn.functional.relu",
"torch.nn.Conv2DBatchNorm",
"torch.arange",
"numpy.zeros",
"torch.nn.functional.pad",
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.functional.avg_pool2d",
"torch.nn.UpsamplingBilinear2d",
"torch.nn.deconv2DBatchNormRelu",
"torch.nn.BatchNorm2d",
"torch.nn.MaxUnpool2d",
"torch.nn.MaxPool2d",
"torch.nn.functional.grid_sample",
"torch.nn.ReLU"
]
] |
MyNameJeremy/Sorting
|
[
"eec69ae01994704501e5228434473cc643b60a3d"
] |
[
"Quicksort.py"
] |
[
"'''\r\nWie benutzt man das Programm?\r\nGanz einfach: Man gibt ein, wie lang das array sein soll\r\n(Ich empfehle eine hohe Zahl, damit sich Quicksort überhaupt lohnt.).\r\nDann drückt man Enter und bekommt die zufälligen Zahlen, die gewählt wurden als ein Diagramm angezeigt.\r\nDas Diagramm schließt man dann wieder und drückt bei der Aufforderung Enter zu drücken Enter.\r\nDann bekommt man noch ein Diagramm angezeigt. Dieses Mal mit den sortierten Werten.\r\n'''\r\n\r\n\r\nimport warnings\r\nwarnings.simplefilter(\"ignore\")\r\nfrom random import *\r\nfrom matplotlib import pyplot as plt \r\n\r\ndef quicksort(array):\r\n if len(array) <= 1:\r\n return(array)\r\n else:\r\n pivot = array.pop()\r\n\r\n larger = []\r\n smaller = []\r\n\r\n for i in array:\r\n if i > pivot:\r\n larger.append(i)\r\n\r\n else:\r\n smaller.append(i)\r\n\r\n return quicksort(smaller) + [pivot] + quicksort(larger)\r\n\r\n\r\nlength = int(input(\"Wie lang soll das array sein? \"))\r\narray = []\r\nfor i in range(length):\r\n array.append(randint(0, length))\r\nplt.plot(array)\r\nplt.show()\r\ninput(\"Drücke Enter zum sortieren\")\r\nplt.plot(quicksort(array))\r\nplt.show()\r\n\r\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
]
] |
koshian2/SNGAN
|
[
"117fbb19ac79bbc561c3ccfe285d6890ea0971f9"
] |
[
"train_stl_resnet_postact.py"
] |
[
"import torch\nimport torchvision\nfrom torchvision import transforms\nfrom tqdm import tqdm\nimport os\nimport pickle\nimport statistics\nimport glob\n\nimport losses\nimport models.post_act_resnet as post_act_resnet\nfrom inception_score import inceptions_score_all_weights\n\ndef load_stl(batch_size):\n # first, store as tensor\n trans = transforms.Compose([\n transforms.Resize(size=(48, 48)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n # train + test (# 13000)\n dataset = torchvision.datasets.STL10(root=\"./data\", split=\"train\", transform=trans, download=True)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=100, shuffle=False)\n imgs, labels = [], []\n for x, y in dataloader:\n imgs.append(x)\n labels.append(y)\n dataset = torchvision.datasets.STL10(root=\"./data\", split=\"test\", transform=trans)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=100, shuffle=False)\n for x, y in dataloader:\n imgs.append(x)\n labels.append(y)\n # as tensor\n all_imgs = torch.cat(imgs, dim=0)\n all_labels = torch.cat(labels, dim=0)\n # as dataset\n dataset = torch.utils.data.TensorDataset(all_imgs, all_labels)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4)\n return dataloader\n\ndef train(cases):\n ## ResNet version stl-10 (post-act, D=small non-resnet)\n\n # case 0\n # n_dis = 5, beta2 = 0.9, non-conditional\n # case 1\n # n_dis = 5, beta2 = 0.9, conditional\n # case 2\n # n_dis = 1, beta2 = 0.9, non-conditional\n # case 3\n # n_dis = 1, beta2 = 0.9, conditional\n # case 4\n # n_dis = 1, beta2 = 0.999, non-conditional\n # case 5\n # n_dis = 1, beta2 = 0.999, conditional\n # case 6\n # n_dis = 1, beta2 = 0.999, non-conditional, leaky_relu_slope = 0.2 (others=0.1)\n # case 7\n # n_dis = 1, beta2 = 0.999, conditional, leaky_relu_slope = 0.2\n # case 8\n # n_dis = 1, beta2 = 0.999, non-conditional, leaky_relu_slope = 0.2, lr_d = 0.001 (others=0.0002)\n # case 9\n # n_dis = 1, beta2 = 0.999, conditional, leaky_relu_slope = 0.2, lr_d = 0.001\n\n output_dir = f\"stl_resnet_postact_case{cases}\"\n\n batch_size = 64\n device = \"cuda\"\n\n dataloader = load_stl(batch_size)\n\n n_classes = 10 if (cases % 2 != 0) else 0 # Conditional / non-Conditional\n n_dis_update = 5 if cases <= 1 else 1\n beta2 = 0.9 if cases <= 3 else 0.999\n lrelu_slope = 0.1 if cases <= 5 else 0.2\n lr_d = 0.0002 if cases <= 7 else 0.001\n\n n_epoch = 1301 if n_dis_update == 5 else 261\n \n model_G = post_act_resnet.Generator(latent_dims=3, n_classes_g=n_classes)\n model_D = post_act_resnet.Discriminator(latent_dims=3, n_classes=n_classes, lrelu_slope=lrelu_slope)\n model_G, model_D = model_G.to(device), model_D.to(device)\n\n param_G = torch.optim.Adam(model_G.parameters(), lr=0.0002, betas=(0.5, beta2))\n param_D = torch.optim.Adam(model_D.parameters(), lr=lr_d, betas=(0.5, beta2))\n\n gan_loss = losses.HingeLoss(batch_size, device)\n\n result = {\"d_loss\": [], \"g_loss\": []}\n n = len(dataloader)\n onehot_encoding = torch.eye(10).to(device)\n\n for epoch in range(n_epoch):\n log_loss_D, log_loss_G = [], []\n\n for i, (real_img, labels) in tqdm(enumerate(dataloader), total=n):\n batch_len = len(real_img)\n if batch_len != batch_size: continue\n\n real_img = real_img.to(device)\n if n_classes != 0:\n label_onehots = onehot_encoding[labels.to(device)] # conditional\n else:\n label_onehots = None # non conditional\n \n # train G\n if i % n_dis_update == 0:\n param_G.zero_grad()\n param_D.zero_grad()\n\n rand_X = torch.randn(batch_len, 128).to(device)\n fake_img = model_G(rand_X, label_onehots)\n fake_img_tensor = fake_img.detach()\n fake_img_onehots = label_onehots.detach() if label_onehots is not None else None\n g_out = model_D(fake_img, label_onehots)\n loss = gan_loss(g_out, \"gen\")\n log_loss_G.append(loss.item())\n # backprop\n loss.backward()\n param_G.step()\n\n # train D\n param_G.zero_grad()\n param_D.zero_grad()\n # train real\n d_out_real = model_D(real_img, label_onehots)\n loss_real = gan_loss(d_out_real, \"dis_real\")\n # train fake\n d_out_fake = model_D(fake_img_tensor, fake_img_onehots)\n loss_fake = gan_loss(d_out_fake, \"dis_fake\")\n loss = loss_real + loss_fake\n log_loss_D.append(loss.item())\n # backprop\n loss.backward()\n param_D.step()\n\n # ログ\n result[\"d_loss\"].append(statistics.mean(log_loss_D))\n result[\"g_loss\"].append(statistics.mean(log_loss_G))\n print(f\"epoch = {epoch}, g_loss = {result['g_loss'][-1]}, d_loss = {result['d_loss'][-1]}\") \n \n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n if epoch % n_dis_update == 0:\n torchvision.utils.save_image(fake_img_tensor, f\"{output_dir}/epoch_{epoch:03}.png\",\n nrow=8, padding=2, normalize=True, range=(-1.0, 1.0))\n\n # 係数保存\n if not os.path.exists(output_dir + \"/models\"):\n os.mkdir(output_dir+\"/models\")\n if epoch % (5 * n_dis_update) == 0: \n torch.save(model_G.state_dict(), f\"{output_dir}/models/gen_epoch_{epoch:04}.pytorch\")\n torch.save(model_D.state_dict(), f\"{output_dir}/models/dis_epoch_{epoch:04}.pytorch\")\n\n # ログ\n with open(output_dir + \"/logs.pkl\", \"wb\") as fp:\n pickle.dump(result, fp)\n \ndef evaluate(cases):\n if cases % 2 == 0:\n enable_conditional = False\n n_classes = 0\n else:\n enable_conditional = True\n n_classes = 10 \n\n inceptions_score_all_weights(\"stl_resnet_postact_case\" + str(cases), post_act_resnet.Generator,\n 100, 100, n_classes=n_classes, latent_dims=3, n_classes_g=n_classes)\n \nif __name__ == \"__main__\":\n for i in range(10):\n train(i)\n evaluate(i)\n"
] |
[
[
"torch.cat",
"torch.randn",
"torch.utils.data.TensorDataset",
"torch.eye",
"torch.utils.data.DataLoader"
]
] |
akash5100/sunpy
|
[
"6f586392f9799383017e0566d4303928183c06be"
] |
[
"sunpy/io/tests/test_fits.py"
] |
[
"import mmap\nfrom pathlib import Path\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pytest\n\nimport astropy.io.fits as fits\n\nfrom sunpy.data.test import get_test_filepath, test_data_filenames\nfrom sunpy.data.test.waveunit import MEDN_IMAGE, MQ_IMAGE, NA_IMAGE, SVSM_IMAGE\nfrom sunpy.io import _fits\nfrom sunpy.util import MetaDict, SunpyMetadataWarning\n\nTEST_RHESSI_IMAGE = get_test_filepath('hsi_image_20101016_191218.fits')\nTEST_AIA_IMAGE = get_test_filepath('aia_171_level1.fits')\nTEST_EIT_HEADER = get_test_filepath('EIT_header/efz20040301.000010_s.header')\nTEST_SWAP_HEADER = get_test_filepath('SWAP/resampled1_swap.header')\n# Some of the tests images contain an invalid BLANK keyword\n# ignore the warning raised by this\npytestmark = pytest.mark.filterwarnings(\"ignore:Invalid 'BLANK' keyword in header\")\n\n\[email protected](\n 'fname, hdus, length',\n [(TEST_RHESSI_IMAGE, None, 4),\n (TEST_RHESSI_IMAGE, 1, 1),\n (TEST_RHESSI_IMAGE, [1, 2], 2),\n (TEST_RHESSI_IMAGE, range(0, 2), 2)]\n)\ndef test_read_hdus(fname, hdus, length):\n pairs = _fits.read(fname, hdus=hdus)\n assert len(pairs) == length\n\n\[email protected](\n 'fname, waveunit',\n [(TEST_RHESSI_IMAGE, None),\n (TEST_EIT_HEADER, None),\n (TEST_AIA_IMAGE, 'angstrom'),\n (MEDN_IMAGE, 'nm'),\n (MQ_IMAGE, 'angstrom'),\n (NA_IMAGE, 'm'),\n (TEST_SWAP_HEADER, 'angstrom'),\n (SVSM_IMAGE, 'nm')]\n)\ndef test_extract_waveunit(fname, waveunit):\n if Path(fname).suffix == '.header':\n header = _fits.format_comments_and_history(fits.Header.fromtextfile(fname))\n else:\n header = _fits.get_header(fname)[0]\n waveunit = _fits.extract_waveunit(header)\n assert waveunit is waveunit\n\n\ndef test_simple_write(tmpdir):\n data, header = _fits.read(TEST_AIA_IMAGE)[0]\n outfile = tmpdir / \"test.fits\"\n _fits.write(str(outfile), data, header)\n assert outfile.exists()\n\n\ndef test_extra_comment_write(tmpdir):\n data, header = _fits.read(TEST_AIA_IMAGE)[0]\n header[\"KEYCOMMENTS\"][\"TEST\"] = \"Hello world\"\n outfile = tmpdir / \"test.fits\"\n _fits.write(str(outfile), data, header)\n assert outfile.exists()\n\n\ndef test_simple_write_compressed(tmpdir):\n data, header = _fits.read(TEST_AIA_IMAGE)[0]\n outfile = tmpdir / \"test.fits\"\n _fits.write(str(outfile), data, header, hdu_type=fits.CompImageHDU)\n assert outfile.exists()\n with fits.open(str(outfile)) as hdul:\n assert len(hdul) == 2\n assert isinstance(hdul[1], fits.CompImageHDU)\n\n\ndef test_simple_write_compressed_difftypeinst(tmpdir):\n # `hdu_type=fits.CompImageHDU` and `hdu_type=fits.CompImageHDU()`\n # should produce identical FITS files\n data, header = _fits.read(TEST_AIA_IMAGE)[0]\n outfile_type = str(tmpdir / \"test_type.fits\")\n outfile_inst = str(tmpdir / \"test_inst.fits\")\n _fits.write(outfile_type, data, header, hdu_type=fits.CompImageHDU)\n _fits.write(outfile_inst, data, header, hdu_type=fits.CompImageHDU())\n assert fits.FITSDiff(outfile_type, outfile_inst, ignore_comments=['PCOUNT']).identical\n\n\[email protected](\n 'kwargs, should_fail',\n [({}, False),\n ({'quantize_level': -32}, True)]\n)\ndef test_simple_write_compressed_instance(tmpdir, kwargs, should_fail):\n data, header = _fits.read(TEST_AIA_IMAGE)[0]\n outfile = tmpdir / \"test.fits\"\n\n # Ensure HDU instance is used correctly\n hdu = fits.CompImageHDU(data=np.array([0.]), **kwargs)\n hdu.header['HELLO'] = 'world' # should be in the written file\n hdu.header['TELESCOP'] = 'other' # should be replaced with 'SDO/AIA'\n hdu.header['NAXIS'] = 5 # should be replaced with 2\n _fits.write(str(outfile), data, header, hdu_type=hdu)\n assert outfile.exists()\n with fits.open(str(outfile)) as hdul:\n assert len(hdul) == 2\n assert isinstance(hdul[1], fits.CompImageHDU)\n assert hdul[1].header['HELLO'] == 'world'\n assert hdul[1].header['TELESCOP'] == 'SDO/AIA'\n assert hdul[1].header['NAXIS'] == 2\n data_preserved = hdul[1].data == pytest.approx(data, abs=10)\n print(np.abs(hdul[1].data - data).max())\n print(kwargs)\n if should_fail: # high compression setting preserved\n assert not data_preserved\n else:\n assert data_preserved\n\n\ndef test_write_with_metadict_header_astropy(tmpdir):\n with fits.open(TEST_AIA_IMAGE) as fits_file:\n data, header = fits_file[0].data, fits_file[0].header\n meta_header = MetaDict(OrderedDict(header))\n temp_file = tmpdir / \"temp.fits\"\n with pytest.warns(SunpyMetadataWarning, match='The meta key comment is not valid ascii'):\n _fits.write(str(temp_file), data, meta_header)\n assert temp_file.exists()\n fits_file.close()\n\n# Various warnings are thrown in this test, but we just want to check that the code\n# works without exceptions\n\n\[email protected]('ignore')\ndef test_fitsheader():\n \"\"\"Test that all test data can be converted back to a FITS header.\"\"\"\n extensions = ('.fts', '.fits')\n for ext in extensions:\n test_files = [f for f in test_data_filenames() if f.suffix == ext]\n for ffile in test_files:\n fits_file = fits.open(ffile)\n fits_file.verify(\"fix\")\n meta_header = MetaDict(OrderedDict(fits_file[0].header))\n _fits.header_to_fits(meta_header)\n fits_file.close()\n\n\ndef test_warn_nonascii():\n # Check that a non-ascii character raises a warning and not an error\n with pytest.warns(SunpyMetadataWarning, match='not valid ascii'):\n fits = _fits.header_to_fits({'bad': 'test\\t',\n 'good': 'test'})\n assert 'GOOD' in fits.keys()\n assert 'BAD' not in fits.keys()\n\n\ndef test_warn_nan():\n # Check that a NaN value raises a warning and not an error\n with pytest.warns(SunpyMetadataWarning, match='has a NaN value'):\n fits = _fits.header_to_fits({'bad': float('nan'),\n 'good': 1.0})\n assert 'GOOD' in fits.keys()\n assert 'BAD' not in fits.keys()\n\n\ndef test_warn_longkey():\n # Check that a key that is too long raises a warning and not an error\n with pytest.warns(SunpyMetadataWarning, match='The meta key badlongkey is too long'):\n fits = _fits.header_to_fits({'badlongkey': 'test',\n 'goodkey': 'test'})\n assert 'GOODKEY' in fits.keys()\n assert 'BADLONGKEY' not in fits.keys()\n\n\ndef test_read_memmap():\n data, _ = _fits.read(TEST_AIA_IMAGE, memmap=True)[0]\n assert data.base is not None\n assert isinstance(data.base, mmap.mmap)\n\n data, _ = _fits.read(TEST_AIA_IMAGE, memmap=False)[0]\n assert data.base is None\n"
] |
[
[
"numpy.array",
"numpy.abs"
]
] |
jetsonworld/TensorFlow_On_JetsonNano
|
[
"216b21f63cb0a33e1a2a4d39be8d250c5ce1b504"
] |
[
"06_Gradient_Boosting/09_xgboost_for_Classification.py"
] |
[
"# xgboost for classification\nfrom numpy import asarray\nfrom numpy import mean\nfrom numpy import std\nfrom sklearn.datasets import make_classification\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom matplotlib import pyplot\n# define dataset\nX, y = make_classification(n_samples=1000, n_features=10, n_informative=5, n_redundant=5, random_state=1)\n# evaluate the model\nmodel = XGBClassifier()\ncv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)\nn_scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')\nprint('Accuracy: %.3f (%.3f)' % (mean(n_scores), std(n_scores)))\n# fit the model on the whole dataset\nmodel = XGBClassifier()\nmodel.fit(X, y)\n# make a single prediction\nrow = [2.56999479, -0.13019997, 3.16075093, -4.35936352, -1.61271951, -1.39352057, -2.48924933, -1.93094078, 3.26130366, 2.05692145]\nrow = asarray(row).reshape((1, len(row)))\nyhat = model.predict(row)\nprint('Prediction: %d' % yhat[0])\n"
] |
[
[
"sklearn.datasets.make_classification",
"sklearn.model_selection.cross_val_score",
"numpy.asarray",
"numpy.std",
"sklearn.model_selection.RepeatedStratifiedKFold",
"numpy.mean"
]
] |
danielmoreira/medifor
|
[
"7d2cd460e3a712d716d288442b2d3dc85d081e9f"
] |
[
"python/examples/provenance/filter_svc/notredame/facade.py"
] |
[
"# Implements a facade to call Notre Dame's fitlering implementation.\n\nimport sys\nimport os\nimport numpy\nimport cv2\n\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/image_lib/') # image library path\nimport image_reader\nimport image_descriptor\nimport image_matcher\n\n\n# Reads an image, given its file path.\n# Returns the obtained image.\ndef read_image(image_file_path):\n image = image_reader.read(image_file_path)\n if image is None:\n image = numpy.zeros((64, 64, 3), dtype=numpy.uint8) # empty image, to keep program running\n print('[WARNING] Image', image_file_path, 'has no content.')\n\n return image\n\n\n# Describes a given image.\n# Returns its interest points and respective feature vectors (descriptions).\ndef describe_image(image, mask=None):\n keypoints, descriptions = image_descriptor.surf_detect_rsift_describe(image, mask)\n\n if len(keypoints) == 0:\n return [], []\n\n return keypoints, descriptions\n\n\n# Builds an image rank containing <rank_size> images,\n# based on the given previously obtained <description_search> result.\ndef build_image_rank(description_search, rank_size):\n # builds the image rank, given the description-wise search result\n image_rank = []\n\n # rank data\n votes = {}\n labels = {}\n max_vote = 0.0\n\n # for each description-wise result\n for item in description_search:\n for element in item['value']:\n gallery_image_id = int(element[0])\n gallery_image_label = element[1]\n gallery_image_pos = int(element[2])\n\n if gallery_image_id not in votes.keys():\n votes[gallery_image_id] = 0.0\n labels[gallery_image_id] = gallery_image_label\n\n # vote is weighted by description position\n votes[gallery_image_id] = votes[gallery_image_id] + (1.0 / gallery_image_pos)\n\n if votes[gallery_image_id] > max_vote:\n max_vote = votes[gallery_image_id]\n\n # adds content to the image rank\n for gallery_image_id in votes.keys():\n score = votes[gallery_image_id] / max_vote\n image_rank.append((gallery_image_id, labels[gallery_image_id], score))\n\n # sorts, trims, and returns the image rank\n image_rank = sorted(image_rank, key=lambda x: x[2], reverse=True)[:rank_size]\n return image_rank\n\n\n# Obtains the context mask between the given query and the given gallery image.\n# Parameters:\n# <query_image> - The query image whose content will be masked by the computed context mask.\n# <query_keypoints> - The previously detected query keypoints.\n# <query_descriptions> - The description of the query keypoints.\n# <gallery_image> - The gallery image whose content will be compared to the query.\n# <gallery_image_keypoints> - The previously detected gallery image keypoints.\n# <gallery_image_descriptions> - The description of the gallery image keypoints.\n# Returns the context mask, which highlights the content differences between the query and the gallery image,\n# after keypoint-based image alignment. The returned mask refers to the query content and therefore has it size.\n# Returns None if no context mask could be computed (i.e., there were not enough matches between the keypoints of\n# the query and of the gallery image.\ndef compute_context_mask(query_image, query_keypoints, query_descriptions,\n gallery_image, gallery_image_keypoints, gallery_image_descriptions):\n # obtains the geometrically consistent matches between the query and gallery image\n good_matches = image_matcher.match(gallery_image_keypoints, gallery_image_descriptions,\n query_keypoints, query_descriptions)\n\n # if there are enough matches for homography\n if len(good_matches) >= 4:\n return image_matcher.compute_context_mask(gallery_image_keypoints, gallery_image,\n query_keypoints, query_image,\n good_matches)\n else:\n return None, None # no mask could be computed\n\n\n# Combines a given set of context masks into a single one.\n# Parameter:\n# <context_mask_list> - The list of context masks.\n# Returns the OR-merged mask.\ndef merge_context_masks(context_mask_list):\n if len(context_mask_list) > 0:\n output = context_mask_list[0]\n for i in range(1, len(context_mask_list)):\n output = cv2.bitwise_or(output, context_mask_list[i])\n return output\n\n else:\n return None\n\n\n# Merges a given set of image ranks.\n# <image_ranks> - Takes a list of image ranks and combines them into a single one.\n# <rank_size> - Maximum size of the obtained image rank.\n# Returns the obtained image rank.\ndef merge_ranks(image_ranks, rank_size=500):\n if len(image_ranks) == 0:\n return []\n\n # holds the votes for each gallery image\n rank_dict = {}\n\n for i in range(len(image_ranks)):\n for item in image_ranks[i]:\n item_key = item[1].split('/')[-1] # reg and flp have the same key\n item_score = item[2]\n if i > 0:\n item_score = item_score / 2.0 # tier-1 has higher weight\n\n # if the current item is not in the dictionary yet,\n # or its respective dictionary score is below the current one,\n # updates the dictionary with the current item\n if item_key not in rank_dict or rank_dict[item_key][2] < item_score:\n rank_dict[item_key] = (item[0], item[1], item_score)\n\n # computes the final rank\n image_rank = []\n for key in rank_dict.keys():\n image_rank.append(rank_dict[key])\n\n # sorts and trims the image rank\n image_rank = sorted(image_rank, key=lambda x: x[2], reverse=True)[:rank_size]\n return image_rank\n"
] |
[
[
"numpy.zeros"
]
] |
ddajing/multilayer-cnn-text-classification
|
[
"ea97105de2e4411eb492e0c268045006a9a3669f"
] |
[
"data_helpers.py"
] |
[
"import numpy as np\r\nimport re\r\nfrom collections import defaultdict\r\n\r\ndef clean_str(string, TREC=False):\r\n \"\"\"\r\n Tokenization/string cleaning for all datasets except for SST.\r\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\r\n \"\"\"\r\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \r\n string = re.sub(r\"\\'s\", \" \\'s\", string) \r\n string = re.sub(r\"\\'ve\", \" \\'ve\", string) \r\n string = re.sub(r\"n\\'t\", \" n\\'t\", string) \r\n string = re.sub(r\"\\'re\", \" \\'re\", string) \r\n string = re.sub(r\"\\'d\", \" \\'d\", string) \r\n string = re.sub(r\"\\'ll\", \" \\'ll\", string) \r\n string = re.sub(r\",\", \" , \", string) \r\n string = re.sub(r\"!\", \" ! \", string) \r\n string = re.sub(r\"\\(\", \" \\( \", string) \r\n string = re.sub(r\"\\)\", \" \\) \", string) \r\n string = re.sub(r\"\\?\", \" \\? \", string) \r\n string = re.sub(r\"\\s{2,}\", \" \", string) \r\n return string.strip().lower()\r\n\r\ndef load_data(neg_file, pos_file):\r\n \"\"\"\r\n Load tokenized input, labels and build vocabulary dict\r\n \"\"\"\r\n\r\n x = []\r\n y = []\r\n vocab = defaultdict(float)\r\n \r\n with open(neg_file, \"r\", encoding = \"utf-8\") as file:\r\n for line in file:\r\n sent = clean_str(line).split()\r\n words = set(sent)\r\n for word in words:\r\n vocab[word] += 1\r\n x.append(sent)\r\n y.append([1,0])\r\n \r\n with open(pos_file, \"r\", encoding = \"utf-8\") as file:\r\n for line in file:\r\n sent = clean_str(line).split()\r\n words = set(sent)\r\n for word in words:\r\n vocab[word] += 1\r\n x.append(sent)\r\n y.append([0,1])\r\n \r\n return x, y, vocab\r\n \r\ndef load_word2vec(w2v_file, vocab):\r\n \"\"\"\r\n Load pretrained word vecs from Google word2vec\r\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\r\n \"\"\"\r\n word_vecs = {}\r\n with open(w2v_file, \"rb\") as file:\r\n header = file.readline()\r\n vocab_size, embedding_size = map(int, header.split())\r\n binary_len = np.dtype(\"float32\").itemsize * embedding_size\r\n for line in range(vocab_size):\r\n #break\r\n word = b\"\"\r\n while True:\r\n char = file.read(1)\r\n if char == \" \".encode():\r\n break\r\n if char != '\\n'.encode():\r\n word = word + char\r\n word = word.decode()\r\n if word in vocab:\r\n word_vecs[word] = np.fromstring(file.read(binary_len), dtype = \"float32\")\r\n else:\r\n file.read(binary_len)\r\n if len(word_vecs) == len(vocab):\r\n break\r\n return word_vecs\r\n\r\ndef add_unknown_words(word_vecs, vocab, embedding_size = 300):\r\n \"\"\"\r\n Add random vecs for word don't appear in pretrained vecs.\r\n \"\"\"\r\n for word in vocab:\r\n if word not in word_vecs:\r\n word_vecs[word] = np.random.uniform(-0.25, 0.25, embedding_size)\r\n\r\n\r\ndef get_pretrained_embedding_filter(w2v, embedding_size = 300):\r\n \"\"\"\r\n Build pretrained embedding filter \r\n and word2index matrix for mapping word to index in filter respectivevly\r\n \"\"\"\r\n W = []\r\n word2index = defaultdict(int)\r\n W.append(np.zeros([embedding_size]))\r\n index = 1\r\n for word in w2v:\r\n word2index[word] = index\r\n W.append(w2v[word])\r\n index += 1\r\n return word2index, np.asarray(W).astype(\"float32\")\r\n\r\ndef index_data(x, word2index):\r\n x_indexed = []\r\n max_length = max([len(sent) for sent in x])\r\n for sent in x:\r\n sent_indexed = [word2index[word] for word in sent]\r\n while len(sent_indexed) < max_length:\r\n sent_indexed.append(0)\r\n x_indexed.append(sent_indexed)\r\n return x_indexed\r\n\r\ndef split_data(x, y, devset_percentage):\r\n \"\"\"\r\n Shuffled and split data into training set and developement set.\r\n \"\"\"\r\n shuffled_indices = np.random.permutation(np.arange(len(y)))\r\n x_shuffled = np.asarray(x)[shuffled_indices]\r\n y_shuffled = np.asarray(y)[shuffled_indices]\r\n split_index = int(-1 * devset_percentage * len(y))\r\n x_train, x_dev = x_shuffled[:split_index], x_shuffled[split_index:]\r\n y_train, y_dev = y_shuffled[:split_index], y_shuffled[split_index:]\r\n return x_train, y_train, x_dev, y_dev\r\n"
] |
[
[
"numpy.asarray",
"numpy.random.uniform",
"numpy.zeros",
"numpy.dtype"
]
] |
europa1610/ALPR-Project
|
[
"6c101b16dd3082bfcf5284efb65f4adc7d44a3df"
] |
[
"utils/roi_pooling.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.autograd as ag\nfrom torch.autograd.function import Function\n#from torch._thnn import type2backend\n\n'''\nclass AdaptiveMaxPool2d(Function):\n def __init__(self, out_w, out_h):\n super(AdaptiveMaxPool2d, self).__init__()\n self.out_w = out_w\n self.out_h = out_h\n\n def forward(self, input):\n output = input.new()\n indices = input.new().long()\n self.save_for_backward(input)\n self.indices = indices\n self._backend = type2backend[type(input)]\n self._backend.SpatialAdaptiveMaxPooling_updateOutput(\n self._backend.library_state, input, output, indices,\n self.out_w, self.out_h)\n return output\n\n def backward(self, grad_output):\n input, = self.saved_tensors\n indices = self.indices\n grad_input = grad_output.new()\n self._backend.SpatialAdaptiveMaxPooling_updateGradInput(\n self._backend.library_state, input, grad_output, grad_input,\n indices)\n return grad_input, None\n\n'''\n\ndef adaptive_max_pool(input, size):\n return nn.AdaptiveMaxPool2d((size[0], size[1]))(input)\n\n\n\ndef roi_pooling(input, rois, size=(7, 7), spatial_scale=1.0):\n assert (rois.dim() == 2)\n assert (rois.size(1) == 5)\n output = []\n rois = rois.data.float()\n num_rois = rois.size(0)\n\n rois[:, 1:].mul_(spatial_scale)\n rois = rois.long()\n for i in range(num_rois):\n roi = rois[i]\n im_idx = roi[0]\n # im = input.narrow(0, im_idx, 1)\n im = input.narrow(0, im_idx, 1)[..., roi[2]:(roi[4] + 1), roi[1]:(roi[3] + 1)]\n output.append(adaptive_max_pool(im, size))\n\n return torch.cat(output, 0)\n\n\ndef roi_pooling_ims(input, rois, output_size=(7, 7), spatial_scale=1.0):\n # written for one roi one image\n # size: (w, h)\n #print('Roi Pooling')\n assert (rois.dim() == 2)\n assert len(input) == len(rois)\n assert (rois.size(1) == 4)\n output = []\n rois = rois.data.float()\n num_rois = rois.size(0)\n\n rois[:, 1:].mul_(spatial_scale)\n rois = rois.long()\n for i in range(num_rois):\n roi = rois[i]\n # im = input.narrow(0, im_idx, 1)\n im = input.narrow(0, i, 1)[..., roi[1]:(roi[3] + 1), roi[0]:(roi[2] + 1)]\n ad_mp = adaptive_max_pool(im, output_size)\n #asd = [len(a) for a in ad_mp]\n #print(len(ad_mp), input.size())\n #print(asd)\n output.append(ad_mp)#adaptive_max_pool(im, size))\n\n #print(type(output))\n #print(output)\n #print(len(output[0]))\n\n #print(torch.Tensor(output[0]).size())\n return torch.cat(output, 0)\n"
] |
[
[
"torch.nn.AdaptiveMaxPool2d",
"torch.cat"
]
] |
tobiasraabe/crypto
|
[
"5b40049169cfbf02f4979a55e8abdb77b834b820"
] |
[
"src/transformers/financial_indicators.py"
] |
[
"#!/usr/bin/env python3\n\n\"\"\"This module provides many commonly used financial indicators.financial\n\nThe design of the classes has to be in line with the base classes from sklearn.\nThen, the classes can be used as parts of scikit-learn pipeline.\n\n\"\"\"\n\nimport numpy as np\n\nfrom sklearn.base import TransformerMixin\nfrom ..utils.statistics import moving_average\n\n\nclass MovingAverages(TransformerMixin):\n \"\"\"This class implements a *scikit-learn* transformer class which will\n produce moving averages for a given ``x``.\n\n\n Parameters\n ----------\n windows : arr\n Array containing interval sizes\n\n Todo\n ----\n - Find better replacement of ``np.NaN`` in :func:`def transform` than 0\n\n \"\"\"\n\n def __init__(self, windows=[20, 50]):\n self.windows = windows\n\n def fit(self, x, y=None):\n \"\"\"Since this method needs no fitting, this function is almost empty.\n ``return self`` allows to use a\n `*Fluent Interface* https://en.wikipedia.org/wiki/Fluent_interface`_.\n Note that you have to provide an array.\n\n\n Parameters\n ----------\n x : matrix/array\n Data which will be transformed\n\n\n Example\n -------\n A fluent interface ensures that ``ma`` after calling ``ma.fit()`` is\n not ``None``:\n ````\n >>> ma = MovingAverages()\n >>> ma = ma.fit()\n >>> print(ma)\n <src.prediction_models.moving_average.MovingAverage object at 0x00000>\n ````\n\n \"\"\"\n return self\n\n def transform(self, x, y=None):\n # Initialise container\n if x.ndim == 1:\n arr = np.empty(shape=(len(self.windows), x.shape[0]))\n else:\n arr = np.empty(\n shape=(x.shape[0] * len(self.windows), x.shape[1]))\n # Check whether a matrix or an array is passed and create moving\n # averages\n if x.ndim == 1:\n if len(self.windows) == 1:\n arr = moving_average(x, window=self.windows[0])\n else:\n for i, window in enumerate(self.windows):\n arr[i] = moving_average(x, window=window)\n else:\n for x_ in x:\n for j, window in enumerate(self.windows):\n arr[j] = moving_average(x_, window=window)\n # Convert ``np.NaN`` to numerics\n arr = np.nan_to_num(arr)\n\n return arr\n\n\nclass LaggedTerms(TransformerMixin):\n\n def __init__(self, number_of_lags=5):\n self.number_of_lags = number_of_lags\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, x, y=None):\n pass\n"
] |
[
[
"numpy.nan_to_num"
]
] |
SiriusKY/pytorch-ocr
|
[
"c739a13116c6833e8dca3be4bc7b66fc757328b4"
] |
[
"test.py"
] |
[
"import argparse\nimport torch\nfrom tqdm import tqdm\nimport data_loader.data_loaders as module_data\nimport model.loss as module_loss\nimport model.metric as module_metric\nimport model.model as module_arch\nfrom parse_config import ConfigParser\n\n\ndef main(config):\n logger = config.get_logger('test')\n\n # setup data_loader instances\n data_loader = getattr(module_data, config['data_loader']['type'])(\n config['data_loader']['args']['data_dir'],\n batch_size=512,\n shuffle=False,\n validation_split=0.0,\n training=False,\n num_workers=2\n )\n\n # build model architecture\n model = config.init_obj('arch', module_arch)\n logger.info(model)\n\n # get function handles of loss and metrics\n loss_fn = getattr(module_loss, config['loss'])\n metric_fns = [getattr(module_metric, met) for met in config['metrics']]\n\n logger.info('Loading checkpoint: {} ...'.format(config.resume))\n checkpoint = torch.load(config.resume)\n state_dict = checkpoint['state_dict']\n if config['n_gpu'] > 1:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(state_dict)\n\n # prepare model for testing\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = model.to(device)\n model.eval()\n\n total_loss = 0.0\n total_metrics = torch.zeros(len(metric_fns))\n\n with torch.no_grad():\n for i, (data, target) in enumerate(tqdm(data_loader)):\n data, target = data.to(device), target.to(device)\n output = model(data)\n\n #\n # save sample images, or do something with output here\n #\n\n # computing loss, metrics on test set\n loss = loss_fn(output, target)\n batch_size = data.shape[0]\n total_loss += loss.item() * batch_size\n for i, metric in enumerate(metric_fns):\n total_metrics[i] += metric(output, target) * batch_size\n\n n_samples = len(data_loader.sampler)\n log = {'loss': total_loss / n_samples}\n log.update({\n met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns)\n })\n logger.info(log)\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description='test')\n args.add_argument('-c', '--config', default=None, type=str,\n help='config file path (default: None)')\n args.add_argument('-r', '--resume', default=None, type=str,\n help='path to latest checkpoint (default: None)')\n args.add_argument('-d', '--device', default=None, type=str,\n help='indices of GPUs to enable (default: all)')\n\n config = ConfigParser.from_args(args)\n main(config)\n"
] |
[
[
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
]
] |
ClaudsFalse/neural_machine_translation
|
[
"253c736af32552d0be5fecd05eb801a776c17f5c"
] |
[
"nmt_toolkit/seq2seq/models/lstm.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom seq2seq import utils\nfrom seq2seq.models import Seq2SeqModel, Seq2SeqEncoder, Seq2SeqDecoder\nfrom seq2seq.models import register_model, register_model_architecture\n\n\n@register_model('lstm')\nclass LSTMModel(Seq2SeqModel):\n \"\"\" Defines the sequence-to-sequence model class. \"\"\"\n\n def __init__(self,\n encoder,\n decoder):\n\n super().__init__(encoder, decoder)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n parser.add_argument('--encoder-embed-dim', type=int, help='encoder embedding dimension')\n parser.add_argument('--encoder-embed-path', help='path to pre-trained encoder embedding')\n parser.add_argument('--encoder-hidden-size', type=int, help='encoder hidden size')\n parser.add_argument('--encoder-num-layers', type=int, help='number of encoder layers')\n parser.add_argument('--encoder-bidirectional', help='bidirectional encoder')\n parser.add_argument('--encoder-dropout-in', help='dropout probability for encoder input embedding')\n parser.add_argument('--encoder-dropout-out', help='dropout probability for encoder output')\n\n parser.add_argument('--decoder-embed-dim', type=int, help='decoder embedding dimension')\n parser.add_argument('--decoder-embed-path', help='path to pre-trained decoder embedding')\n parser.add_argument('--decoder-hidden-size', type=int, help='decoder hidden size')\n parser.add_argument('--decoder-num-layers', type=int, help='number of decoder layers')\n parser.add_argument('--decoder-dropout-in', type=float, help='dropout probability for decoder input embedding')\n parser.add_argument('--decoder-dropout-out', type=float, help='dropout probability for decoder output')\n parser.add_argument('--decoder-use-attention', help='decoder attention')\n parser.add_argument('--decoder-use-lexical-model', help='toggle for the lexical model')\n\n @classmethod\n def build_model(cls, args, src_dict, tgt_dict):\n \"\"\" Constructs the model. \"\"\"\n base_architecture(args)\n encoder_pretrained_embedding = None\n decoder_pretrained_embedding = None\n\n # Load pre-trained embeddings, if desired\n if args.encoder_embed_path:\n encoder_pretrained_embedding = utils.load_embedding(args.encoder_embed_path, src_dict)\n if args.decoder_embed_path:\n decoder_pretrained_embedding = utils.load_embedding(args.decoder_embed_path, tgt_dict)\n\n # Construct the encoder\n encoder = LSTMEncoder(dictionary=src_dict,\n embed_dim=args.encoder_embed_dim,\n hidden_size=args.encoder_hidden_size,\n num_layers=args.encoder_num_layers,\n bidirectional=args.encoder_bidirectional,\n dropout_in=args.encoder_dropout_in,\n dropout_out=args.encoder_dropout_out,\n pretrained_embedding=encoder_pretrained_embedding)\n\n # Construct the decoder\n decoder = LSTMDecoder(dictionary=tgt_dict,\n embed_dim=args.decoder_embed_dim,\n hidden_size=args.decoder_hidden_size,\n num_layers=args.decoder_num_layers,\n dropout_in=args.decoder_dropout_in,\n dropout_out=args.decoder_dropout_out,\n pretrained_embedding=decoder_pretrained_embedding,\n use_attention=bool(eval(args.decoder_use_attention)),\n use_lexical_model=bool(eval(args.decoder_use_lexical_model)))\n return cls(encoder, decoder)\n\n\nclass LSTMEncoder(Seq2SeqEncoder):\n \"\"\" Defines the encoder class. \"\"\"\n\n def __init__(self,\n dictionary,\n embed_dim=64,\n hidden_size=64,\n num_layers=1,\n bidirectional=True,\n dropout_in=0.25,\n dropout_out=0.25,\n pretrained_embedding=None):\n\n super().__init__(dictionary)\n\n self.dropout_in = dropout_in\n self.dropout_out = dropout_out\n self.bidirectional = bidirectional\n self.hidden_size = hidden_size\n self.output_dim = 2 * hidden_size if bidirectional else hidden_size\n\n if pretrained_embedding is not None:\n self.embedding = pretrained_embedding\n else:\n self.embedding = nn.Embedding(len(dictionary), embed_dim, dictionary.pad_idx)\n\n dropout_lstm = dropout_out if num_layers > 1 else 0.\n self.lstm = nn.LSTM(input_size=embed_dim,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=dropout_lstm,\n bidirectional=bidirectional)\n\n def forward(self, src_tokens, src_lengths):\n \"\"\" Performs a single forward pass through the instantiated encoder sub-network. \"\"\"\n # Embed tokens and apply dropout\n batch_size, src_time_steps = src_tokens.size()\n src_embeddings = self.embedding(src_tokens)\n _src_embeddings = F.dropout(src_embeddings, p=self.dropout_in, training=self.training)\n\n # Transpose batch: [batch_size, src_time_steps, num_features] -> [src_time_steps, batch_size, num_features]\n src_embeddings = _src_embeddings.transpose(0, 1)\n\n # Pack embedded tokens into a PackedSequence\n packed_source_embeddings = nn.utils.rnn.pack_padded_sequence(src_embeddings, src_lengths)\n\n # Pass source input through the recurrent layer(s)\n packed_outputs, (final_hidden_states, final_cell_states) = self.lstm(packed_source_embeddings)\n\n # Unpack LSTM outputs and optionally apply dropout (dropout currently disabled)\n lstm_output, _ = nn.utils.rnn.pad_packed_sequence(packed_outputs, padding_value=0.)\n lstm_output = F.dropout(lstm_output, p=self.dropout_out, training=self.training)\n assert list(lstm_output.size()) == [src_time_steps, batch_size, self.output_dim] # sanity check\n\n '''\n ___QUESTION-1-DESCRIBE-A-START___\n Describe what happens when self.bidirectional is set to True. \n What is the difference between final_hidden_states and final_cell_states?\n '''\n if self.bidirectional:\n def combine_directions(outs):\n return torch.cat([outs[0: outs.size(0): 2], outs[1: outs.size(0): 2]], dim=2)\n final_hidden_states = combine_directions(final_hidden_states)\n final_cell_states = combine_directions(final_cell_states)\n '''___QUESTION-1-DESCRIBE-A-END___'''\n\n # Generate mask zeroing-out padded positions in encoder inputs\n src_mask = src_tokens.eq(self.dictionary.pad_idx)\n return {'src_embeddings': _src_embeddings.transpose(0, 1),\n 'src_out': (lstm_output, final_hidden_states, final_cell_states),\n 'src_mask': src_mask if src_mask.any() else None}\n\n\nclass AttentionLayer(nn.Module):\n \"\"\" Defines the attention layer class. Uses Luong's global attention with the general scoring function. \"\"\"\n def __init__(self, input_dims, output_dims):\n super().__init__()\n # Scoring method is 'general'\n self.src_projection = nn.Linear(input_dims, output_dims, bias=False)\n self.context_plus_hidden_projection = nn.Linear(input_dims + output_dims, output_dims, bias=False)\n\n def forward(self, tgt_input, encoder_out, src_mask):\n # tgt_input has shape = [batch_size, input_dims]\n # encoder_out has shape = [src_time_steps, batch_size, output_dims]\n # src_mask has shape = [src_time_steps, batch_size]\n\n # Get attention scores\n encoder_out = encoder_out.transpose(1, 0)\n attn_scores = self.score(tgt_input, encoder_out)\n\n '''\n ___QUESTION-1-DESCRIBE-B-START___\n Describe how the attention context vector is calculated. Why do we need to apply a mask to the attention scores?\n '''\n if src_mask is not None:\n src_mask = src_mask.unsqueeze(dim=1)\n attn_scores.masked_fill_(src_mask, float('-inf'))\n attn_weights = F.softmax(attn_scores, dim=-1)\n attn_context = torch.bmm(attn_weights, encoder_out).squeeze(dim=1)\n context_plus_hidden = torch.cat([tgt_input, attn_context], dim=1)\n attn_out = torch.tanh(self.context_plus_hidden_projection(context_plus_hidden))\n '''___QUESTION-1-DESCRIBE-B-END___'''\n\n return attn_out, attn_weights.squeeze(dim=1)\n\n def score(self, tgt_input, encoder_out):\n \"\"\" Computes attention scores. \"\"\"\n\n '''\n ___QUESTION-1-DESCRIBE-C-START___\n How are attention scores calculated? What role does matrix multiplication (i.e. torch.bmm()) play \n in aligning encoder and decoder representations?\n '''\n projected_encoder_out = self.src_projection(encoder_out).transpose(2, 1)\n attn_scores = torch.bmm(tgt_input.unsqueeze(dim=1), projected_encoder_out)\n '''___QUESTION-1-DESCRIBE-C-END___'''\n\n return attn_scores\n\n\nclass LSTMDecoder(Seq2SeqDecoder):\n \"\"\" Defines the decoder class. \"\"\"\n\n def __init__(self,\n dictionary,\n embed_dim=64,\n hidden_size=128,\n num_layers=1,\n dropout_in=0.25,\n dropout_out=0.25,\n pretrained_embedding=None,\n use_attention=True,\n use_lexical_model=False):\n\n super().__init__(dictionary)\n\n self.dropout_in = dropout_in\n self.dropout_out = dropout_out\n self.embed_dim = embed_dim\n self.hidden_size = hidden_size\n\n if pretrained_embedding is not None:\n self.embedding = pretrained_embedding\n else:\n self.embedding = nn.Embedding(len(dictionary), embed_dim, dictionary.pad_idx)\n\n # Define decoder layers and modules\n self.attention = AttentionLayer(hidden_size, hidden_size) if use_attention else None\n\n self.layers = nn.ModuleList([nn.LSTMCell(\n input_size=hidden_size + embed_dim if layer == 0 else hidden_size,\n hidden_size=hidden_size)\n for layer in range(num_layers)])\n\n self.final_projection = nn.Linear(hidden_size, len(dictionary))\n\n self.use_lexical_model = use_lexical_model\n if self.use_lexical_model:\n # __QUESTION: Add parts of decoder architecture corresponding to the LEXICAL MODEL here\n pass\n # TODO: --------------------------------------------------------------------- /CUT\n\n def forward(self, tgt_inputs, encoder_out, incremental_state=None):\n \"\"\" Performs the forward pass through the instantiated model. \"\"\"\n # Optionally, feed decoder input token-by-token\n if incremental_state is not None:\n tgt_inputs = tgt_inputs[:, -1:]\n\n # __QUESTION : Following code is to assist with the LEXICAL MODEL implementation\n # Recover encoder input\n src_embeddings = encoder_out['src_embeddings']\n\n src_out, src_hidden_states, src_cell_states = encoder_out['src_out']\n src_mask = encoder_out['src_mask']\n src_time_steps = src_out.size(0)\n\n # Embed target tokens and apply dropout\n batch_size, tgt_time_steps = tgt_inputs.size()\n tgt_embeddings = self.embedding(tgt_inputs)\n tgt_embeddings = F.dropout(tgt_embeddings, p=self.dropout_in, training=self.training)\n\n # Transpose batch: [batch_size, tgt_time_steps, num_features] -> [tgt_time_steps, batch_size, num_features]\n tgt_embeddings = tgt_embeddings.transpose(0, 1)\n\n # Initialize previous states (or retrieve from cache during incremental generation)\n '''\n ___QUESTION-1-DESCRIBE-D-START___\n Describe how the decoder state is initialized. When is cached_state == None? What role does input_feed play?\n '''\n cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state')\n if cached_state is not None:\n tgt_hidden_states, tgt_cell_states, input_feed = cached_state\n else:\n tgt_hidden_states = [torch.zeros(tgt_inputs.size()[0], self.hidden_size) for i in range(len(self.layers))]\n tgt_cell_states = [torch.zeros(tgt_inputs.size()[0], self.hidden_size) for i in range(len(self.layers))]\n input_feed = tgt_embeddings.data.new(batch_size, self.hidden_size).zero_()\n '''___QUESTION-1-DESCRIBE-D-END___'''\n\n # Initialize attention output node\n attn_weights = tgt_embeddings.data.new(batch_size, tgt_time_steps, src_time_steps).zero_()\n rnn_outputs = []\n\n # __QUESTION : Following code is to assist with the LEXICAL MODEL implementation\n # Cache lexical context vectors per translation time-step\n lexical_contexts = []\n\n for j in range(tgt_time_steps):\n # Concatenate the current token embedding with output from previous time step (i.e. 'input feeding')\n lstm_input = torch.cat([tgt_embeddings[j, :, :], input_feed], dim=1)\n\n for layer_id, rnn_layer in enumerate(self.layers):\n # Pass target input through the recurrent layer(s)\n tgt_hidden_states[layer_id], tgt_cell_states[layer_id] = \\\n rnn_layer(lstm_input, (tgt_hidden_states[layer_id], tgt_cell_states[layer_id]))\n\n # Current hidden state becomes input to the subsequent layer; apply dropout\n lstm_input = F.dropout(tgt_hidden_states[layer_id], p=self.dropout_out, training=self.training)\n\n '''\n ___QUESTION-1-DESCRIBE-E-START___\n How is attention integrated into the decoder? Why is the attention function given the previous \n target state as one of its inputs? What is the purpose of the dropout layer?\n '''\n if self.attention is None:\n input_feed = tgt_hidden_states[-1]\n else:\n input_feed, step_attn_weights = self.attention(tgt_hidden_states[-1], src_out, src_mask)\n attn_weights[:, j, :] = step_attn_weights\n\n if self.use_lexical_model:\n # __QUESTION: Compute and collect LEXICAL MODEL context vectors here\n # TODO: --------------------------------------------------------------------- CUT\n pass\n # TODO: --------------------------------------------------------------------- /CUT\n\n input_feed = F.dropout(input_feed, p=self.dropout_out, training=self.training)\n rnn_outputs.append(input_feed)\n '''___QUESTION-1-DESCRIBE-E-END___'''\n\n # Cache previous states (only used during incremental, auto-regressive generation)\n utils.set_incremental_state(\n self, incremental_state, 'cached_state', (tgt_hidden_states, tgt_cell_states, input_feed))\n\n # Collect outputs across time steps\n decoder_output = torch.cat(rnn_outputs, dim=0).view(tgt_time_steps, batch_size, self.hidden_size)\n\n # Transpose batch back: [tgt_time_steps, batch_size, num_features] -> [batch_size, tgt_time_steps, num_features]\n decoder_output = decoder_output.transpose(0, 1)\n\n # Final projection\n decoder_output = self.final_projection(decoder_output)\n\n if self.use_lexical_model:\n # __QUESTION: Incorporate the LEXICAL MODEL into the prediction of target tokens here\n pass\n # TODO: --------------------------------------------------------------------- /CUT\n\n return decoder_output, attn_weights\n\n\n@register_model_architecture('lstm', 'lstm')\ndef base_architecture(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 64)\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\n args.encoder_hidden_size = getattr(args, 'encoder_hidden_size', 64)\n args.encoder_num_layers = getattr(args, 'encoder_num_layers', 1)\n args.encoder_bidirectional = getattr(args, 'encoder_bidirectional', 'True')\n args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', 0.25)\n args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', 0.25)\n\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 64)\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\n args.decoder_hidden_size = getattr(args, 'decoder_hidden_size', 128)\n args.decoder_num_layers = getattr(args, 'decoder_num_layers', 1)\n args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', 0.25)\n args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', 0.25)\n args.decoder_use_attention = getattr(args, 'decoder_use_attention', 'True')\n args.decoder_use_lexical_model = getattr(args, 'decoder_use_lexical_model', 'False')\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.nn.functional.dropout",
"torch.nn.LSTM",
"torch.cat",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.LSTMCell",
"torch.nn.Linear",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.bmm"
]
] |
Vaderico/LSTM-Camera-Game
|
[
"3ccc05f2f6ea5c06eb36f4e5a755dca51d913bcc"
] |
[
"model.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torchvision\nfrom torchvision import models\nimport sys\n\nclass LSTMController(nn.Module):\n def __init__(self, hidden_dim, middle_out_dim):\n super(LSTMController, self).__init__()\n self.hidden_dim = hidden_dim\n\n self.init_resnet()\n res18_out_dim = 512 * 7 * 7\n # res18_out_dim = 512\n self.middle_out_dim = middle_out_dim\n\n self.middle = nn.Linear(res18_out_dim, self.middle_out_dim)\n\n self.lstm = nn.LSTM(self.middle_out_dim, self.hidden_dim)\n self.hiddenToVel = nn.Linear(self.hidden_dim, 2)\n self.init_hidden()\n\n def init_resnet(self):\n self.res18_model = models.resnet18(pretrained=True)\n self.res18_model = nn.Sequential(*list(self.res18_model.children())[:-2]).cuda()\n\n # def init_resnet(self):\n # self.res18_model = models.resnet18(pretrained=True)\n # self.res18_model = nn.Sequential(*list(self.res18_model.children())[:-1]).cuda()\n\n def init_hidden(self):\n self.hidden = (torch.zeros(1, 1, self.hidden_dim).cuda(),\n torch.zeros(1, 1, self.hidden_dim).cuda())\n\n def forward(self, X):\n with torch.no_grad():\n X = self.res18_model(X.float())\n\n X = self.middle(X.view(len(X), -1))\n\n lstm_out, self.hidden = self.lstm(X.reshape(len(X), 1, -1), self.hidden)\n\n vel_space = self.hiddenToVel(lstm_out.view(len(X), -1))\n\n return vel_space\n\nif __name__ == \"__main__\" :\n print(models.__file__)\n lstm = LSTMController(500,500).cuda()\n lstm(torch.randn((100,3,244,244)).cuda())\n\n"
] |
[
[
"torch.zeros",
"torch.nn.LSTM",
"torch.randn",
"torch.nn.Linear",
"torch.no_grad"
]
] |
thomasw21/DeepSpeed
|
[
"99bd592d253c6c2e995c49978c4444f989cfd7be"
] |
[
"deepspeed/moe/sharded_moe.py"
] |
[
"'''\nCopyright 2021 The Microsoft DeepSpeed Team\n'''\n# The file has been adapted from two fairscale files:\n# (1) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/moe_layer.py\n# (2) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/top2gate.py\n# Git commit hash: 34df606902a240567a0d898037ece55c2f1336cf\n# We retain the following license from the original files:\n\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer\nfrom typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple, Union, cast\n\nimport time\nfrom time import perf_counter\nimport torch\nfrom torch import Tensor\nimport torch.distributed as dist\nfrom torch.nn import Module, ModuleList\n\nif TYPE_CHECKING:\n Base = Module[Tensor]\nelse:\n Base = Module\n\nuniform_map: Dict[torch.device, Callable] = {}\ngumbel_map: Dict[torch.device, Callable] = {}\nexp_selection_uniform_map: Dict[torch.device, Callable] = {}\n\n\ndef multiplicative_jitter(x, device: torch.device, epsilon=1e-2):\n \"\"\"\n Modified from switch transformer paper. mesh transformers\n Multiply values by a random number between 1-epsilon and 1+epsilon.\n Makes models more resilient to rounding errors introduced by bfloat16.\n This seems particularly important for logits.\n Args:\n x: a torch.tensor\n device: torch.device\n epsilon: a floating point value\n Returns:\n a jittered x.\n \"\"\"\n if epsilon == 0:\n return x\n uniform = uniform_map.get(device)\n if uniform is None:\n uniform = torch.distributions.uniform.Uniform(\n low=torch.tensor(1.0 - epsilon,\n device=device),\n high=torch.tensor(1.0 + epsilon,\n device=device)).rsample # type: ignore\n uniform_map[device] = uniform\n return x * uniform(x.shape)\n\n\ndef gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:\n gumbel = gumbel_map.get(device)\n if gumbel is None:\n one = torch.tensor(1.0, device=device)\n zero = torch.tensor(0.0, device=device)\n gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore\n gumbel_map[device] = gumbel\n return gumbel(shape)\n\n\nimport torch.distributed as dist\n\n# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity\n# See https://arxiv.org/pdf/2006.16668.pdf for details.\n\n\n# Based on https://github.com/pytorch/pytorch/pull/40762\nclass _AllToAll(torch.autograd.Function):\n @staticmethod\n def forward(ctx: Any,\n group: dist.ProcessGroup,\n input: Tensor) -> Tensor: # type: ignore\n ctx.group = group\n input = input.contiguous()\n output = torch.empty_like(input)\n dist.all_to_all_single(output, input, group=group)\n return output\n\n @staticmethod\n def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]:\n return (None, _AllToAll.apply(ctx.group, *grad_output))\n\n\nfrom torch import nn\nimport torch.nn.functional as F\n\nimport math\n\n# einsum rewrites are on par or more performant\n# switch can be bubbled up in future\nUSE_EINSUM = True\n\n\ndef einsum(rule, a, b):\n if USE_EINSUM:\n return torch.einsum(rule, a, b)\n elif rule == 's,se->se':\n return a.reshape(a.shape[0], -1) * b\n elif rule == 'se,sc->sec':\n return a.unsqueeze(2) * b.unsqueeze(1)\n elif rule == 'se,se->s':\n return torch.bmm(a.unsqueeze(1), b.unsqueeze(2)).reshape(-1)\n elif rule == 'sec,sm->ecm':\n s = a.shape[0]\n e = a.shape[1]\n c = a.shape[2]\n m = b.shape[1]\n return torch.matmul(a.reshape(s, -1).t(), b).reshape(e, c, m)\n elif rule == 'sec,ecm->sm':\n return torch.matmul(a.reshape(a.shape[0], -1), b.reshape(-1, b.shape[-1]))\n elif rule == 'ks,ksm->sm':\n k = b.shape[0]\n s = b.shape[1]\n m = b.shape[2]\n # [k, s] -> [s, k] -> [s, 1, k]\n a = a.t().unsqueeze(1)\n # [k,s,m] -> [k, sm] -> [sm, k] -> [s, m, k]\n b = b.reshape(k, -1).t().reshape(s, m, k)\n # bmm([s, 1, k], [s, m, k]^t) -> [s, m, 1]\n return torch.bmm(a, b.transpose(1, 2)).squeeze(2)\n else:\n return torch.einsum(rule, a, b)\n\n\ndef top1gating(logits: torch.Tensor,\n capacity_factor: float,\n min_capacity: int,\n used_token: torch.Tensor = None,\n noisy_gate_policy: Optional[str] = None) -> Tuple[Tensor,\n Tensor,\n Tensor]:\n \"\"\"Implements Top1Gating on logits.\"\"\"\n if noisy_gate_policy == 'RSample':\n logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)\n # everything is in fp32 in this function\n gates = F.softmax(logits, dim=1)\n\n # gates has shape of SE\n num_tokens = int(gates.shape[0])\n num_experts = int(gates.shape[1])\n # round-up\n capacity = math.ceil((num_tokens / num_experts) * capacity_factor)\n if capacity < min_capacity:\n capacity = min_capacity\n\n # Create a mask for 1st's expert per token\n # noisy gating\n indices1_s = torch.argmax(\n logits_w_noise if noisy_gate_policy == 'RSample' else gates,\n dim=1)\n mask1 = F.one_hot(indices1_s, num_classes=num_experts)\n\n # mask only used tokens\n if used_token is not None:\n mask1 = einsum(\"s,se->se\", used_token, mask1)\n\n # gating decisions\n exp_counts = torch.sum(mask1, dim=0).detach().to('cpu')\n\n # Compute l_aux\n me = torch.mean(gates, dim=0)\n ce = torch.mean(mask1.float(), dim=0)\n l_aux = torch.sum(me * ce) * num_experts\n\n uniform = exp_selection_uniform_map.get(logits.device)\n if uniform is None:\n uniform = torch.distributions.uniform.Uniform(\n low=torch.tensor(0.0,\n device=logits.device),\n high=torch.tensor(1.0,\n device=logits.device)).rsample\n exp_selection_uniform_map[logits.device] = uniform\n\n mask1_rand = mask1 * uniform(mask1.shape)\n\n assert logits.shape[0] >= min_capacity, \"No. of tokens (batch-size) should be greater than min_capacity. Either set min_capacity to 0 or increase your batch size.\"\n\n _, top_idx = torch.topk(mask1_rand, k=capacity, dim=0)\n\n new_mask1 = mask1 * torch.zeros_like(mask1).scatter_(0, top_idx, 1)\n\n # Compute locations in capacity buffer\n locations1 = torch.cumsum(new_mask1, dim=0) - 1\n\n # Store the capacity location for each token\n locations1_s = torch.sum(locations1 * new_mask1, dim=1)\n\n # Normalize gate probabilities\n mask1_float = new_mask1.float()\n gates = gates * mask1_float\n\n locations1_sc = F.one_hot(locations1_s, num_classes=capacity).float()\n combine_weights = einsum(\"se,sc->sec\", gates, locations1_sc)\n\n dispatch_mask = combine_weights.bool()\n\n return l_aux, combine_weights, dispatch_mask, exp_counts\n\n\ndef top2gating(logits: torch.Tensor,\n capacity_factor: float) -> Tuple[Tensor,\n Tensor,\n Tensor]:\n \"\"\"Implements Top2Gating on logits.\"\"\"\n # everything is in fp32 in this function\n # logits_fp32 = logits.to(torch.float32)\n gates = F.softmax(logits, dim=1)\n\n # gates has shape of SE\n num_tokens = int(gates.shape[0])\n num_experts = int(gates.shape[1])\n # capacity = (2 * num_tokens // num_experts) * capacity_factor\n # round-up\n capacity = math.ceil((2 * num_tokens / num_experts) * capacity_factor)\n\n # Create a mask for 1st's expert per token\n indices1_s = torch.argmax(gates, dim=1)\n mask1 = F.one_hot(indices1_s, num_classes=num_experts)\n\n # Create a mask for 2nd's expert per token using Gumbel-max trick\n # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/\n logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)\n # Replace top-expert with min value\n logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\"))\n indices2_s = torch.argmax(logits_except1, dim=1)\n mask2 = F.one_hot(indices2_s, num_classes=num_experts)\n\n # Compute locations in capacity buffer\n locations1 = torch.cumsum(mask1, dim=0) - 1\n locations2 = torch.cumsum(mask2, dim=0) - 1\n # Update 2nd's location by accounting for locations of 1st\n locations2 += torch.sum(mask1, dim=0, keepdim=True)\n\n # gating decisions\n exp_counts = torch.sum(mask1, dim=0).detach().to('cpu')\n\n # Compute l_aux\n me = torch.mean(gates, dim=0)\n ce = torch.mean(mask1.float(), dim=0)\n l_aux = torch.mean(me * ce) * num_experts * num_experts\n\n # Remove locations outside capacity from mask\n mask1 *= torch.lt(locations1, capacity)\n mask2 *= torch.lt(locations2, capacity)\n\n # Store the capacity location for each token\n locations1_s = torch.sum(locations1 * mask1, dim=1)\n locations2_s = torch.sum(locations2 * mask2, dim=1)\n\n # Normalize gate probabilities\n mask1_float = mask1.float()\n mask2_float = mask2.float()\n gates1_s = einsum(\"se,se->s\", gates, mask1_float)\n gates2_s = einsum(\"se,se->s\", gates, mask2_float)\n denom_s = gates1_s + gates2_s\n # Avoid divide-by-zero\n denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)\n gates1_s /= denom_s\n gates2_s /= denom_s\n\n # Calculate combine_weights and dispatch_mask\n gates1 = einsum(\"s,se->se\", gates1_s, mask1_float)\n gates2 = einsum(\"s,se->se\", gates2_s, mask2_float)\n locations1_sc = F.one_hot(locations1_s, num_classes=capacity).float()\n locations2_sc = F.one_hot(locations2_s, num_classes=capacity).float()\n combine1_sec = einsum(\"se,sc->sec\", gates1, locations1_sc)\n combine2_sec = einsum(\"se,sc->sec\", gates2, locations2_sc)\n combine_weights = combine1_sec + combine2_sec\n dispatch_mask = combine_weights.bool()\n\n return l_aux, combine_weights, dispatch_mask, exp_counts\n\n\nclass TopKGate(torch.nn.Module):\n \"\"\"Gate module which implements Top2Gating as described in Gshard_.\n ::\n\n gate = TopKGate(model_dim, num_experts)\n l_aux, combine_weights, dispatch_mask = gate(input)\n\n .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf\n\n Args:\n model_dim (int):\n size of model embedding dimension\n num_experts (ints):\n number of experts in model\n \"\"\"\n\n wg: torch.nn.Linear\n\n def __init__(self,\n model_dim: int,\n num_experts: int,\n k: int = 1,\n capacity_factor: float = 1.0,\n eval_capacity_factor: float = 1.0,\n min_capacity: int = 4,\n noisy_gate_policy: Optional[str] = None) -> None:\n super().__init__()\n\n # Only top-1 and top-2 are supported at the moment.\n if k != 1 and k != 2:\n raise ValueError('Only top-1 and top-2 gatings are supported.')\n self.wg = torch.nn.Linear(model_dim, num_experts, bias=False).float()\n self.k = k\n self.capacity_factor = capacity_factor\n self.eval_capacity_factor = eval_capacity_factor\n self.min_capacity = min_capacity\n self.noisy_gate_policy = noisy_gate_policy\n self.timers = SynchronizedWallClockTimer()\n self.wall_clock_breakdown = False\n self.gate_time = 0.0\n\n def forward(\n self,\n input: torch.Tensor,\n used_token: torch.Tensor = None\n ) -> Tuple[Tensor,\n Tensor,\n Tensor]: # type: ignore\n\n if self.wall_clock_breakdown:\n self.timers('TopKGate').start()\n\n if self.wg.weight.dtype != torch.float32:\n self.wg = self.wg.float()\n input_fp32 = input.float()\n # input jittering\n if self.noisy_gate_policy == 'Jitter' and self.training:\n input_fp32 = multiplicative_jitter(input_fp32, device=input.device)\n logits = self.wg(input_fp32)\n\n if self.k == 1:\n gate_output = top1gating(\n logits,\n self.capacity_factor if self.training else self.eval_capacity_factor,\n self.min_capacity,\n used_token,\n self.noisy_gate_policy if self.training else None)\n\n else:\n gate_output = top2gating(\n logits,\n self.capacity_factor if self.training else self.eval_capacity_factor)\n\n if self.wall_clock_breakdown:\n self.timers('TopKGate').stop()\n self.gate_time = self.timers('TopKGate').elapsed(reset=False) * 1000\n\n return gate_output\n\n\nclass MOELayer(Base):\n \"\"\"MOELayer module which implements MixtureOfExperts as described in Gshard_.\n ::\n\n gate = TopKGate(model_dim, num_experts)\n moe = MOELayer(gate, expert)\n output = moe(input)\n l_aux = moe.l_aux\n\n .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf\n\n Args:\n gate (torch.nn.Module):\n gate network\n expert (torch.nn.Module):\n expert network\n \"\"\"\n def __init__(self,\n gate: Module,\n experts: Module,\n num_local_experts: int,\n group: Optional[Any] = None) -> None:\n super().__init__()\n self.gate = gate\n self.experts = experts\n self.group = group\n self.world_size = dist.get_world_size(group)\n self.num_local_experts = num_local_experts\n self.time_falltoall = 0.0\n self.time_salltoall = 0.0\n self.time_moe = 0.0\n self.timers = SynchronizedWallClockTimer()\n self.wall_clock_breakdown = False\n\n def forward(self, *input: Tensor, **kwargs: Any) -> Tensor:\n\n if self.wall_clock_breakdown:\n self.timers('moe').start()\n\n # Implement Algorithm 2 from GShard paper.\n d_model = input[0].shape[-1]\n\n # Initial implementation -> Reshape into S tokens by dropping sequence dimension.\n # Reshape into G groups so that each group can distribute tokens equally\n # group_size = kwargs['group_size'] if 'group_size' in kwargs.keys() else 1\n reshaped_input = input[0].reshape(-1, d_model)\n\n self.l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1])\n\n dispatched_input = einsum(\"sec,sm->ecm\",\n dispatch_mask.type_as(input[0]),\n reshaped_input)\n\n if self.wall_clock_breakdown:\n self.timers('falltoall').start()\n\n dispatched_input = _AllToAll.apply(self.group, dispatched_input)\n\n if self.wall_clock_breakdown:\n self.timers('falltoall').stop()\n self.time_falltoall = self.timers('falltoall').elapsed(reset=False) * 1000\n\n # Re-shape after all-to-all: ecm -> gecm\n dispatched_input = dispatched_input.reshape(self.world_size,\n self.num_local_experts,\n -1,\n d_model)\n\n expert_output = self.experts(dispatched_input)\n\n if self.wall_clock_breakdown:\n self.timers('salltoall').start()\n\n expert_output = _AllToAll.apply(self.group, expert_output)\n\n if self.wall_clock_breakdown:\n self.timers('salltoall').stop()\n self.time_salltoall = self.timers('salltoall').elapsed(reset=False) * 1000\n\n # Re-shape back: gecm -> ecm\n expert_output = expert_output.reshape(self.world_size * self.num_local_experts,\n -1,\n d_model)\n\n combined_output = einsum(\"sec,ecm->sm\",\n combine_weights.type_as(input[0]),\n expert_output)\n\n a = combined_output.reshape(input[0].shape)\n\n if self.wall_clock_breakdown:\n self.timers('moe').stop()\n self.time_moe = self.timers('moe').elapsed(reset=False) * 1000\n\n return a\n"
] |
[
[
"torch.mean",
"torch.nn.functional.softmax",
"torch.empty_like",
"torch.finfo",
"torch.distributed.all_to_all_single",
"torch.distributions.gumbel.Gumbel",
"torch.topk",
"torch.einsum",
"torch.sum",
"torch.lt",
"torch.zeros_like",
"torch.tensor",
"torch.nn.Linear",
"torch.nn.functional.one_hot",
"torch.distributed.get_world_size",
"torch.cumsum",
"torch.argmax"
]
] |
phil-mansfield/guppy
|
[
"5394d20b83912cd072a358c38bae18a06853f419"
] |
[
"scripts/plot_phi_pts.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport palette\nfrom palette import pc\n\ndef mvir_to_rvir(mvir):\n return 0.4459216 * (mvir/1e13)**(1.0/3)\n\ndef mvir_to_vvir(mvir):\n return 310.6 * (mvir/1e13)**(1.0/3)\n\ndef main():\n palette.configure(True)\n\n file_names = [\n \"profiles/phi_pts/phi_pts_4_1.25.txt\",\n \"profiles/phi_pts/phi_pts_4_2.5.txt\",\n \"profiles/phi_pts/phi_pts_4_5.txt\",\n \"profiles/phi_pts/phi_pts_4_10.txt\",\n \"profiles/phi_pts/phi_pts_4_20.txt\"\n ]\n\n log_r, phi = np.loadtxt(file_names[0]).T\n log_r_2, phi_2 = np.loadtxt(file_names[-1]).T\n \n mvir = 5.112e+09\n vvir = mvir_to_vvir(mvir)\n rvir = mvir_to_rvir(mvir)\n\n log_r -= np.log10(rvir)\n log_r_2 -= np.log10(rvir)\n \n phi /= vvir**2\n phi_2 /= vvir**2\n\n plt.plot([-12, 5], [-12, 5], \"--\", lw=2, c=pc(\"a\"))\n plt.plot(phi, phi_2, \".\", c=\"k\")\n \n plt.xlim(-12, 5)\n plt.ylim(-12, 5)\n \n plt.xlabel(r\"$\\Phi_{\\rm true}/V_{\\rm vir}^2$\")\n plt.ylabel(r\"$\\Phi(\\delta_v = 0.8 V_{\\rm vir})/V_{\\rm vir}^2$\")\n \n plt.fill_between([0, 5], [0, 0], [-12, -12], color=pc(\"b\"), alpha=0.2)\n plt.fill_between([-12, 0], [5, 5], [0, 0], color=pc(\"r\"), alpha=0.2)\n\n plt.savefig(\"plots/fig4_vvir_boundedness.png\")\n\nif __name__ == \"__main__\": main()\n \n"
] |
[
[
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"numpy.log10",
"matplotlib.pyplot.xlabel",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
]
] |
chenbo123222/MegEngine
|
[
"2e8742086563ea442c357b14560245c54e0aa0a3"
] |
[
"imperative/python/test/unit/core/test_dtype_intbx.py"
] |
[
"# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\nimport pickle\n\nimport numpy as np\nimport pytest\n\nfrom megengine.core.tensor.dtype import intb1, intb2, intb4\nfrom megengine.core.tensor.raw_tensor import as_raw_tensor\n\n\ndef bit_define_test(bit, low_bit_type):\n max_value = (1 << bit) - 1\n min_value = 1 - (1 << bit)\n\n a = np.array([i for i in range(min_value, max_value + 2, 2)], dtype=low_bit_type)\n\n for i in range(max_value + 1):\n np.testing.assert_equal(a[i], i * 2 - max_value)\n np.testing.assert_equal(str(a[i]), str(i * 2 - max_value))\n\n with pytest.raises(ValueError):\n np.arange(min_value, max_value, dtype=low_bit_type)\n\n with pytest.raises(ValueError):\n np.arange(min_value - 2, max_value + 4, 2, dtype=low_bit_type)\n\n np.testing.assert_allclose(\n np.arange(min_value, 12, 2, dtype=low_bit_type),\n (np.arange((13 - min_value) // 2, dtype=np.int8) % (max_value + 1)) * 2\n - max_value,\n )\n\n np.testing.assert_allclose(\n np.arange(max_value, max_value - 20, -2, dtype=low_bit_type),\n (np.arange(max_value, max_value - 10, -1, dtype=np.int8) % (max_value + 1)) * 2\n - max_value,\n )\n\n\ndef test_define():\n bit_define_test(1, intb1)\n bit_define_test(2, intb2)\n bit_define_test(4, intb4)\n\n\ndef _bit_cast_test(bit, low_bit_type):\n dtypes = [np.int8, np.int16, np.int32, np.float32, np.float64]\n\n max_value = (1 << bit) - 1\n min_value = 1 - (1 << bit)\n for dtype in dtypes:\n np.testing.assert_allclose(\n np.arange(min_value, max_value + 2, 2, dtype=low_bit_type).astype(dtype),\n np.arange(min_value, max_value + 2, 2, dtype=dtype),\n )\n\n with pytest.raises(ValueError):\n np.array([2, 1, -1], dtype=int).astype(low_bit_type)\n with pytest.raises(ValueError):\n np.array([min_value - 2, 1, max_value + 2], dtype=int).astype(low_bit_type)\n\n\ndef test_cast():\n _bit_cast_test(1, intb1)\n _bit_cast_test(2, intb2)\n _bit_cast_test(4, intb4)\n\n\ndef _shared_nd_test(bit, low_bit_type):\n max_value = (1 << bit) - 1\n min_value = 1 - (1 << bit)\n\n data = np.arange(min_value, max_value + 2, 2, dtype=low_bit_type)\n snd = as_raw_tensor(data, dtype=low_bit_type, device=\"xpux\")\n np.testing.assert_allclose(snd.numpy(), range(min_value, max_value + 2, 2))\n\n data = np.arange(min_value, max_value + 2, 4, dtype=low_bit_type)\n snd = as_raw_tensor(data, dtype=low_bit_type, device=\"xpux\")\n np.testing.assert_allclose(snd.numpy(), range(min_value, max_value + 2, 4))\n\n\ndef test_shared_nd():\n _shared_nd_test(1, intb1)\n _shared_nd_test(2, intb2)\n _shared_nd_test(4, intb4)\n\n\ndef test_pickle():\n x = np.ascontiguousarray(np.random.randint(2, size=8192) * 2 - 1, dtype=intb1)\n pkl = pickle.dumps(x, pickle.HIGHEST_PROTOCOL)\n y = pickle.loads(pkl)\n assert x.dtype is y.dtype\n np.testing.assert_allclose(x.astype(np.float32), y.astype(np.float32))\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.random.randint",
"numpy.testing.assert_equal"
]
] |
doronbehar/sundials
|
[
"6ddce5d90084d8d1cbb8e12bb5a4402168325efe"
] |
[
"examples/arkode/CXX_serial/plot_heat2D.py"
] |
[
"#!/usr/bin/env python\n# ------------------------------------------------------------------------------\n# Programmer(s): Daniel R. Reynolds @ SMU\n# David J. Gardner @ LLNL\n# ------------------------------------------------------------------------------\n# SUNDIALS Copyright Start\n# Copyright (c) 2002-2020, Lawrence Livermore National Security\n# and Southern Methodist University.\n# All rights reserved.\n#\n# See the top-level LICENSE and NOTICE files for details.\n#\n# SPDX-License-Identifier: BSD-3-Clause\n# SUNDIALS Copyright End\n# ------------------------------------------------------------------------------\n# matplotlib-based plotting script for the serial ark_heat2D example\n# ------------------------------------------------------------------------------\n\n# imports\nimport sys, os\nimport shlex\nimport numpy as np\nfrom pylab import *\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\n\n# ------------------------------------------------------------------------------\n\n# read problem info file\ninfofile = 'heat2d_info.txt'\n\nwith open(infofile) as fn:\n\n # read the file line by line\n for line in fn:\n\n # split line into list\n text = shlex.split(line)\n\n # x-direction upper domian bound\n if \"xu\" in line:\n xu = float(text[1])\n continue\n\n # y-direction upper domain bound\n if \"yu\" in line:\n yu = float(text[1])\n continue\n\n # nodes in the x-direction\n if \"nx\" in line:\n nx = int(text[1])\n continue\n\n # nodes in the y-direction\n if \"ny\" in line:\n ny = int(text[1])\n continue\n\n # number of output times\n if \"nt\" in line:\n nt = int(text[1])\n continue\n\n# ------------------------------------------------------------------------------\n\n# check if the error was output\nfname = 'heat2d_error.txt'\n\nif os.path.isfile(fname):\n plottype = ['solution', 'error']\nelse:\n plottype = ['solution']\n\nfor pt in plottype:\n\n # fill array with data\n time = np.zeros(nt)\n result = np.zeros((nt, ny, nx))\n\n # load data\n data = np.loadtxt('heat2d_' + pt + '.txt', dtype=np.double)\n\n # extract data\n for i in range(nt):\n time[i] = data[i,0]\n result[i,0:ny+1,0:nx+1] = np.reshape(data[i,1:], (ny,nx))\n\n # determine extents of plots\n maxtemp = 1.1 * result.max()\n mintemp = 0.9 * result.min()\n\n # set x and y meshgrid objects\n xspan = np.linspace(0.0, xu, nx)\n yspan = np.linspace(0.0, yu, ny)\n X,Y = np.meshgrid(xspan, yspan)\n\n nxstr = repr(nx)\n nystr = repr(ny)\n\n # generate plots\n for tstep in range(nt):\n\n # set string constants for output plots, current time, mesh size\n pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png'\n tstr = str(time[tstep])\n\n # plot surface and save to disk\n fig = plt.figure(1)\n ax = fig.add_subplot(111, projection='3d')\n\n ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1,\n cmap=cm.jet, linewidth=0, antialiased=True, shade=True)\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlim((mintemp, maxtemp))\n ax.view_init(20,45)\n if (pt == 'solution'):\n title('u(x,y) at t = ' + tstr)\n else:\n title('error(x,y) at t = ' + tstr)\n savefig(pname)\n plt.close()\n\n##### end of script #####\n"
] |
[
[
"numpy.linspace",
"numpy.reshape",
"matplotlib.pyplot.close",
"numpy.meshgrid",
"numpy.zeros",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] |
emreay-/cyclops
|
[
"72c4af694f51918eebea45a0fcd4c8c5ca3c19bd"
] |
[
"cyclops/utility.py"
] |
[
"#\n# Emre Ay, April 2017\n# Cyclops project: Localization system with an overhead camera\n#\n\nimport argparse\nfrom typing import Callable\n\nimport numpy as np\nimport cv2\nimport yaml\n\nfrom cyclops.scale_estimation import scale_estimation\n\n\nclass Keys:\n space = 32\n esc = 27\n enter = 10\n c = 99\n\n\nclass VideoCaptureInterface(object):\n\n def __init__(self, cam_src = 0, main_window_name: str = 'Main'):\n self.main_window = main_window_name\n self.roi = []\n self.quit = False\n self.is_done = False\n self.is_dragging = False\n self.is_cropped = False\n self.image = np.zeros((1,1,1), np.uint8)\n self.processed_image = np.zeros((1,1,1), np.uint8)\n self.video_capture = cv2.VideoCapture(cam_src)\n\n def destroy_windows(self):\n cv2.destroyWindow(self.main_window)\n\n def mouse_callback(self, event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN and not self.is_dragging:\n self.start_dragging(x, y)\n\n if event == cv2.EVENT_MOUSEMOVE and self.is_dragging:\n self.dragging_process(x, y)\n\n if event == cv2.EVENT_LBUTTONUP and self.is_dragging:\n self.stop_dragging(x, y)\n\n if event == cv2.EVENT_RBUTTONDOWN:\n self.right_click_process(x, y)\n \n def start_dragging(self, x, y):\n self.roi = [(x,y)]\n self.is_dragging = True\n\n def dragging_process(self, x, y):\n temp_image = self.processed_image.copy()\n cv2.rectangle(temp_image, self.roi[0], (x,y), (0, 255, 0), 2)\n cv2.imshow(self.main_window, temp_image)\n \n def stop_dragging(self, x, y):\n self.roi.append((x,y))\n temp_image = self.processed_image.copy()\n cv2.rectangle(temp_image, self.roi[0], (x,y), (0, 255, 0), 2)\n cv2.imshow(self.main_window, temp_image)\n self.is_dragging = False\n \n def right_click_process(self, x, y):\n self.roi = []\n self.is_dragging = False\n self.is_cropped = False\n\n def run_loop(self, key_processor: Callable):\n self.handle_main_window()\n\n while not self.is_done and not self.quit:\n if self.video_capture.isOpened():\n self.capture_image()\n self.process_image()\n key = self.wait_key()\n\n if self.is_region_selected():\n self.show_selected_region_on_processed_image()\n else:\n self.show_processed_image()\n \n key_processor(key)\n\n else:\n print('VideoCapture not opened')\n\n self.video_capture.release()\n\n def handle_main_window(self):\n self.create_main_window()\n self.set_mouse_callback()\n\n def create_main_window(self):\n cv2.namedWindow(self.main_window, cv2.WINDOW_AUTOSIZE)\n\n def set_mouse_callback(self):\n cv2.setMouseCallback(self.main_window, self.mouse_callback)\n\n def capture_image(self):\n _ , self.image = self.video_capture.read()\n\n def process_image(self):\n self.processed_image = self.image\n\n def wait_key(self):\n return cv2.waitKey(50)\n\n def is_region_selected(self):\n return len(self.roi) == 2\n\n def show_selected_region_on_processed_image(self):\n temp_image = self.processed_image.copy()\n cv2.rectangle(temp_image, self.roi[0], self.roi[1], (0, 0, 255), 2)\n cv2.imshow(self.main_window, temp_image)\n\n def show_processed_image(self):\n cv2.imshow(self.main_window, self.processed_image)\n\n def get_top_left_bottom_right_points_from_roi(self):\n (r1,r2),(r3,r4) = self.roi\n x1 = min(r1,r3)\n x2 = max(r1,r3)\n y1 = min(r2,r4)\n y2 = max(r2,r4)\n return (x1, y1), (x2, y2)\n \n @staticmethod\n def add_padding(image, padding=50):\n offset = int(padding / 2)\n image_with_padding = np.zeros((image.shape[0]+padding, image.shape[1]+padding, image.shape[2]), np.uint8)\n image_with_padding[offset:offset+image.shape[0],offset:offset+image.shape[1]] = image\n return image_with_padding\n\n def process_escape_key(self, key):\n if key == Keys.esc:\n self.destroy_windows()\n self.quit = True\n\n\nclass Scaler(VideoCaptureInterface):\n \n def __init__(self, cam_src = 0, param_src: str = None):\n super().__init__(cam_src, main_window_name = 'Scaler')\n self.undistort = False\n self.reference_width = 0.297\n self.cropped_window = 'Cropped'\n self.binary_window = 'Binary Image'\n self.result_window = 'Result'\n self.pixel_scale = None\n self.load_camera_parameters_if_given(param_src)\n\n def load_camera_parameters_if_given(self, param_src):\n if param_src:\n with open(param_src,'r') as stream:\n try:\n camera_info = yaml.load(stream)\n except yaml.YAMLError as exception:\n print(exception)\n if camera_info:\n self.undistort = True\n camera_matrix = camera_info['camera_matrix']['data']\n self.camera_matrix = np.reshape(np.array(camera_matrix),(3,3))\n distortion = camera_info['distortion_coefficients']['data']\n self.distortion = np.array(distortion)\n print('Camera calibration found. \\nCamera matrix:\\n{} \\nDistortion:\\n{}'.format(camera_matrix,distortion))\n\n def destroy_windows(self):\n super().destroy_windows()\n self.destroy_non_main_windows()\n\n def destroy_non_main_windows(self):\n cv2.destroyWindow(self.cropped_window)\n cv2.destroyWindow(self.binary_window)\n cv2.destroyWindow(self.result_window)\n\n def dragging_process(self, x, y):\n temp_image = self.processed_image.copy()\n temp_image = cv2.cvtColor(self.processed_image, cv2.COLOR_GRAY2RGB)\n cv2.rectangle(temp_image, self.roi[0], (x,y), (0, 255, 0), 2)\n cv2.imshow(self.main_window, temp_image)\n\n def stop_dragging(self, x, y):\n self.roi.append((x,y))\n temp_image = self.processed_image.copy()\n temp_image = cv2.cvtColor(self.processed_image, cv2.COLOR_GRAY2RGB)\n cv2.rectangle(temp_image, self.roi[0], (x,y), (0, 255, 0), 2)\n cv2.imshow(self.main_window, temp_image)\n self.is_dragging = False\n print('Selected region: {}'.format(self.roi))\n\n def right_click_process(self, x, y):\n super().right_click_process(x, y)\n self.destroy_non_main_windows()\n\n def process_image(self):\n if self.undistort:\n self.processed_image = cv2.undistort(self.image, self.camera_matrix, self.distortion)\n self.processed_image = cv2.cvtColor(self.processed_image, cv2.COLOR_BGR2GRAY)\n self.processed_image = cv2.GaussianBlur(self.processed_image, (7, 7), 0)\n\n def run(self):\n self.run_loop(self.key_processor)\n return self.pixel_scale\n\n def key_processor(self, key):\n if key == Keys.space or self.is_cropped:\n self.is_cropped = True\n if self.is_region_selected():\n (x1, y1), (x2, y2) = self.get_top_left_bottom_right_points_from_roi()\n self.cropped_image = self.processed_image[y1:y2, x1:x2]\n cv2.namedWindow(self.cropped_window, cv2.WINDOW_AUTOSIZE)\n cv2.imshow(self.cropped_window, self.cropped_image)\n\n if key == Keys.c and self.is_cropped:\n threshold = int(np.mean(self.cropped_image))\n _, binary_image = cv2.threshold(self.cropped_image, threshold, 255, cv2.THRESH_BINARY)\n cv2.namedWindow(self.binary_window, cv2.WINDOW_AUTOSIZE)\n cv2.imshow(self.binary_window, binary_image)\n self.estimate_scale(binary_image)\n\n if key == Keys.enter:\n if self.pixel_scale:\n self.destroy_windows()\n self.is_done = True\n else:\n print('Operation is not done yet.')\n\n self.process_escape_key(key)\n\n def estimate_scale(self, image):\n self.pixel_scale, color_image = scale_estimation(image, self.reference_width)\n cv2.namedWindow(self.result_window, cv2.WINDOW_AUTOSIZE)\n cv2.imshow(self.result_window, color_image)\n\n\nclass MemberInitializer(VideoCaptureInterface):\n\n def __init__(self, main_window_name = 'Member Initializer', cam_src = 0):\n super().__init__(cam_src, main_window_name = main_window_name)\n self.cropped_window = 'Selected Area For Member Color'\n self.color_window = 'Selected Member Color'\n\n def destroy_windows(self):\n super().destroy_windows()\n self.destroy_non_main_windows()\n\n def destroy_non_main_windows(self):\n cv2.destroyWindow(self.cropped_window)\n cv2.destroyWindow(self.color_window)\n\n def right_click_process(self, x, y):\n self.destroy_non_main_windows()\n\n def get_member_color(self):\n self.run_loop(self.color_initialization_key_processor)\n return self.color\n\n def initialize_member_location(self):\n self.run_loop(self.location_initialization_key_processor)\n return self.init_location\n \n def color_initialization_key_processor(self, key):\n if key == Keys.space or self.is_cropped:\n self.is_cropped = True\n if len(self.roi) == 2:\n (x1, y1), (x2, y2) = self.get_top_left_bottom_right_points_from_roi()\n self.cropped_image = self.image[y1:y2, x1:x2]\n cv2.namedWindow(self.cropped_window, cv2.WINDOW_AUTOSIZE)\n cv2.imshow(self.cropped_window, self.add_padding(self.cropped_image))\n\n if key == Keys.c and self.is_cropped:\n mean_color = cv2.mean(self.cropped_image)\n mean_color_image = self.cropped_image\n self.color = (mean_color[0], mean_color[1], mean_color[2])\n mean_color_image[:] = self.color\n cv2.namedWindow(self.color_window, cv2.WINDOW_AUTOSIZE)\n cv2.imshow(self.color_window, self.add_padding(mean_color_image))\n print('Average BGR for member id: {}'.format(self.color))\n\n if key == Keys.enter:\n if self.color:\n self.destroy_windows()\n self.is_done = True\n else:\n print('Operation not done yet.')\n\n self.process_escape_key(key)\n\n def location_initialization_key_processor(self, key):\n if key == Keys.enter:\n if len(self.roi) == 2:\n (x1, y1), (x2, y2) = self.get_top_left_bottom_right_points_from_roi()\n self.init_location = ((x1 + x2) / 2.0, (y1 + y2) / 2.0) \n self.destroy_windows()\n self.is_done = True\n \n else:\n print('Operation not done yet.')\n\n self.process_escape_key(key)\n\n\nclass Member:\n members = int(0)\n\n def __init__(self):\n self._front_color = None\n self._rear_color = None\n self._init_location = None\n self._id = Member.members\n Member.members += 1\n\n def initialize_color(self):\n self._front_color = MemberInitializer(\n 'Select area for member front color').get_member_color()\n self._rear_color = MemberInitializer(\n 'Select area for member rear color').get_member_color()\n \n def initialize_location(self):\n self._init_location = MemberInitializer().initialize_member_location()\n\n @property\n def front_color(self):\n return self._front_color\n\n @property\n def rear_color(self):\n return self._rear_color\n\n @property\n def id(self):\n return self._id\n\n @property\n def initial_location(self):\n return self._init_location\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.mean"
]
] |
Wenlin-Chen/PILCO
|
[
"8a3a3eba26a3524beae86e050be108efb4f4113b"
] |
[
"pilco/models/smgpr.py"
] |
[
"import gpflow\nimport tensorflow as tf\nimport numpy as np\n\nfrom .mgpr import MGPR\n\nfrom gpflow import config\nfloat_type = config.default_float()\n\n\nclass SMGPR(MGPR):\n def __init__(self, data, num_induced_points, name=None):\n self.num_induced_points = num_induced_points\n MGPR.__init__(self, data, name)\n\n def create_models(self, data):\n self.models = []\n for i in range(self.num_outputs):\n kern = gpflow.kernels.SquaredExponential(lengthscales=tf.ones([data[0].shape[1],], dtype=float_type))\n Z = np.random.rand(self.num_induced_points, self.num_dims)\n #TODO: Maybe fix noise for better conditioning\n self.models.append(gpflow.models.GPRFITC((data[0], data[1][:, i:i+1]), kern, inducing_variable=Z))\n\n @tf.function\n def calculate_factorizations(self):\n batched_eye = tf.eye(self.num_induced_points, batch_shape=[self.num_outputs], dtype=float_type)\n # TODO: Change 1e-6 to the respective constant of GPflow\n Kmm = self.K(self.Z) + 1e-6 * batched_eye\n Kmn = self.K(self.Z, self.X)\n L = tf.linalg.cholesky(Kmm)\n V = tf.linalg.triangular_solve(L, Kmn)\n G = self.variance[:, None] - tf.reduce_sum(tf.square(V), axis=[1])\n G = tf.sqrt(1.0 + G/self.noise[:, None])\n V = V/G[:, None]\n Am = tf.linalg.cholesky(tf.matmul(V, V, transpose_b=True) + \\\n self.noise[:, None, None] * batched_eye)\n At = tf.matmul(L, Am)\n iAt = tf.linalg.triangular_solve(At, batched_eye)\n Y_ = tf.transpose(self.Y)[:, :, None]\n beta = tf.linalg.triangular_solve(L,\n tf.linalg.cholesky_solve(Am, (V/G[:, None]) @ Y_),\n adjoint=True\n )[:, :, 0]\n iB = tf.matmul(iAt, iAt, transpose_a=True) * self.noise[:, None, None]\n iK = tf.linalg.cholesky_solve(L, batched_eye) - iB\n return iK, beta\n\n @tf.function\n def centralized_input(self, m):\n return self.Z - m\n\n @property\n def Z(self):\n return self.models[0].inducing_variable.Z\n"
] |
[
[
"tensorflow.matmul",
"tensorflow.transpose",
"tensorflow.linalg.cholesky_solve",
"tensorflow.linalg.triangular_solve",
"tensorflow.eye",
"tensorflow.ones",
"numpy.random.rand",
"tensorflow.square",
"tensorflow.sqrt",
"tensorflow.linalg.cholesky"
]
] |
campos20/wca-statistics
|
[
"ff7218038c526e8005435ad7ea762fdbc3eba9b0"
] |
[
"src/utils.py"
] |
[
"import json\nimport pandas as pd\nfrom math import *\nimport re\n\n# WCA_export_Competitions labels\n\"\"\"id name cityName countryId information [0-4]\nyear month day endMonth endDay [5-9]\neventSpecs wcaDelegate organiser [10-12]\nvenue venueAddress venueDetails [13-15]\nexternal_website cellName latitude longitude [16-19]\n\"\"\"\n\n# WCA_export_Results labels\n\"\"\"competitionId eventId roundTypeId pos [0-3]\nbest average personName personId personCountryId [4-8]\nformatId value1 value2 value3 value4 value5 [9-14]\nregionalSingleRecord regionalAverageRecord [15-16]\n\"\"\"\n# if ordered, then it's added\n# year month day [17-19]\n\n\ndef get_set_wca_events():\n \"\"\"Returns a set with all current WCA events as of\n https://www.worldcubeassociation.org/regulations/#9b\"\"\"\n\n return set(\"222 333 333bf 333fm 333ft 333mbf 333oh 444 444bf 555 555bf 666 777 clock minx pyram skewb sq1\".split())\n\n\ndef get_export_date():\n with open('WCA_export/metadata.json', 'r') as f:\n array = json.load(f)\n\n return array[\"export_date\"]\n\n\ndef html_link_format(text, link):\n return '<a href=\"%s\">%s</a>' % (link, text)\n\n\ndef reduce_to_letters(s):\n out = \"\"\n for x in s:\n if x.isalpha():\n out += x\n return out\n\n\ndef avg(l):\n if len(l) == 0:\n return 0.\n return 1.0*sum(l)/len(l)\n\n\ndef parse_link(link):\n \"\"\"Get link's text only\"\"\"\n if \"<a href\" in link:\n return link[link.index(\">\")+1: link.index(\"</a>\")]\n return link\n\n\ndef get_competitor_link(wca_id):\n return \"https://www.worldcubeassociation.org/persons/%s\" % wca_id\n\n\ndef get_competition_html_link(competition_id):\n link = \"https://www.worldcubeassociation.org/competitions/%s\" % competition_id\n return html_link_format(competition_id, link)\n\n\ndef largest_range(lista):\n\n # LISTA MUST HAVE NO REPETITIONS AND IT MUST BE SORTED\n\n i = 0\n r = 1\n max_r = 0\n min_range = -1 # where the range started\n max_range = -1 # where the range ended\n STEP = 1 # if you want ranges in 2 (eg. 4, 6, 8), change here\n\n range_start = lista[i]\n range_end = lista[i]\n\n while i < len(lista)-1:\n\n if lista[i+1]-lista[i] == STEP:\n r += 1\n else:\n if r >= max_r:\n max_r = r\n max_range = lista[i]\n min_range = range_start\n\n range_start = lista[i+1]\n r = 1\n\n i += 1\n\n if r > max_r:\n max_r = r\n max_range = lista[i]\n min_range = range_start\n\n # len of range, start, end\n return (max_r, min_range, max_range)\n\n\ndef time_format(time):\n time = float(time)/100\n\n h = int(time/3600)\n time -= h*3600\n\n m = int(time/60)\n time -= m*60\n\n s = int(time)\n time -= s\n\n d = int(time*100)\n\n if h > 0:\n return \"%s:%s:%s.%s\" % (h, str(m).zfill(2), str(s).zfill(2), str(d).zfill(2))\n return \"%s:%s.%s\" % (str(m).zfill(2), str(s).zfill(2), str(d).zfill(2))\n\n\ndef get_competition_index_in_tsv(competition_id):\n # This does not consider the header, so, if it's not using pandas, better be sure if there's need to fix by 1.\n # Since competitions might have names capitalized or not, this messes with standard bisect.\n # So here is a new bisect, ignoring capitalization.\n\n data = (pd.read_csv(\n 'WCA_export/WCA_export_Competitions.tsv', sep='\\t'))[\"id\"]\n\n id_upper = competition_id.upper()\n\n start = 0\n end = len(data)\n i = (start+end)/2\n\n while start < end:\n i = (start+end)/2\n if data[i].upper() < x:\n start = i+1\n else:\n end = i\n\n return i\n\n\ndef dist(lat1, lon1, lat2, lon2):\n # https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude\n\n R = 6373.0\n\n lat1 = radians(float(lat1)/pow(10, 6))\n lon1 = radians(float(lon1)/pow(10, 6))\n lat2 = radians(float(lat2)/pow(10, 6))\n lon2 = radians(float(lon2)/pow(10, 6))\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n return R*c\n\n\ndef find_continent(countryId):\n if countryId == \"United States\":\n return \"North America\"\n\n data = pd.read_csv(\"WCA_export/WCA_export_Countries.tsv\", sep=\"\\t\")\n continentId = data[data[\"id\"] == countryId][\"continentId\"].to_string(\n index=False).strip()\n\n data = pd.read_csv(\"WCA_export/WCA_export_Continents.tsv\", sep=\"\\t\")\n continent = data[data[\"id\"] == continentId][\"name\"].to_string(\n index=False).strip()\n\n return continent\n\n\ndef find_name(countryId):\n \"\"\"From XE to multiple countries europe, for example.\"\"\"\n data = pd.read_csv(\"WCA_export/WCA_export_Countries.tsv\", sep=\"\\t\")\n name = data[data[\"id\"] == countryId][\"name\"].to_string(index=False).strip()\n return name\n\n\ndef iso2_country_name(countryId):\n country_list = pd.read_csv(\"WCA_export/WCA_export_Countries.tsv\", sep='\\t')\n countryId = countryId.strip()\n return country_list[country_list[\"iso2\"] == countryId][\"name\"].values[-1]\n\n\ndef extract_delegate(line):\n \"\"\"Delegates are written on a strange way on the export.\n This function extracts all of them.\n Example:\n [{Marlon de V. Marques}{mailto:[email protected]}] [{Murillo Gomes Otero}{mailto:[email protected]}] [{Pedro Santos Guimarães}{mailto:[email protected]}]\"\"\"\n out = []\n for x in re.findall(\"\\[(.*?)\\]\", line):\n delegate = re.findall(\"\\{(.*?)\\}\", x)\n delegate_name = delegate[0]\n out.append(delegate_name)\n return out\n\n\ndef get_delegates_list():\n with open('temp/delegates.json') as delegates_json:\n return json.load(delegates_json)\n raise \"Error loading delegates json\"\n"
] |
[
[
"pandas.read_csv"
]
] |
nilsgawlik/gdmc_2021_submission
|
[
"79b14af744f43b7af190f18dd5fa8dcdf5db10e3"
] |
[
"wfc_test.py"
] |
[
"from ast import walk\nimport cv2\nimport numpy as np\nimport time\nimport wfc_implementation\nimport mapUtils\nimport interfaceUtils\nfrom worldLoader import WorldSlice\nimport math\n\ntiles = np.array([\n [[2,2,2,2,2],[2,2,2,2,2],[2,2,2,2,2],[2,2,2,2,2],[2,2,2,2,2]],[[2,2,1,0,0],[2,2,1,0,0],[1,1,1,0,0],[0,0,0,0,0],[0,0,0,0,0]],[[0,0,0,0,0],[0,0,0,0,0],[1,1,1,1,1],[2,2,2,2,2],[2,2,2,2,2]],[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]],[[0,0,1,2,2],[0,0,1,2,2],[1,1,1,2,2],[2,2,2,2,2],[2,2,2,2,2]],[[0,1,1,1,0],[1,1,1,1,0],[1,1,1,1,1],[1,1,1,1,2],[0,0,1,2,2]],[[0,1,1,1,0],[1,1,1,1,0],[1,1,1,1,0],[1,1,1,0,0],[0,0,0,0,0]],[[2,2,2,2,2],[2,2,2,2,2],[2,2,2,2,2],[1,1,1,1,1],[2,2,2,2,2]],[[2,2,1,0,0],[2,1,1,0,0],[1,1,0,0,0],[0,0,0,0,0],[0,0,0,0,0]],[[0,0,1,2,2],[0,1,1,2,2],[1,1,2,2,2],[2,2,2,2,2],[2,2,2,2,2]],[[0,0,1,2,2],[0,0,1,2,2],[0,0,1,1,1],[0,0,1,2,2],[0,0,1,2,2]],[[0,0,1,2,2],[1,1,1,2,2],[1,1,1,1,1],[1,1,1,2,2],[0,0,1,2,2]],[[0,0,1,2,2],[0,0,1,2,2],[0,0,1,2,2],[0,0,1,2,2],[0,0,1,2,2]],[[2,2,2,2,2],[2,2,2,2,2],[1,1,1,1,1],[2,2,2,2,2],[2,2,2,2,2]],[[0,0,1,2,2],[1,1,1,2,2],[1,2,2,2,2],[1,1,1,2,2],[0,0,1,2,2]],[[2,2,1,2,2],[2,2,1,2,2],[1,1,1,1,1],[2,2,2,2,2],[2,2,2,2,2]],[[0,1,1,1,0],[0,1,2,1,0],[0,1,2,1,0],[0,1,2,1,0],[0,1,1,1,0]],[[0,0,1,2,2],[0,0,1,2,2],[1,1,1,1,1],[2,2,2,2,2],[2,2,2,2,2]],[[2,2,1,0,0],[2,2,1,0,0],[1,1,1,1,1],[2,2,2,2,2],[2,2,2,2,2]],[[0,0,1,2,2],[0,1,1,1,2],[1,1,1,1,1],[2,1,1,1,0],[2,2,1,0,0]]])\n\n\ntiles = wfc_implementation.expandRotations(tiles)\n\n# w = h = 15\nw = h = 5\nlayers = 3\narea = (700, -750, w*5*3, h*5*3)\nbuildArea = interfaceUtils.requestBuildArea()\nif buildArea != -1:\n x1 = buildArea[\"xFrom\"]\n z1 = buildArea[\"zFrom\"]\n x2 = buildArea[\"xTo\"]\n z2 = buildArea[\"zTo\"]\n # print(buildArea)\n area = (x1, z1, int((x2-x1)/15)*15, int((z2-z1)/15)*15)\nprint(\"Build area is at position %s, %s with size %s, %s\" % area)\n\nworldSlice = WorldSlice(area)\nstrctElmt3x3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n# crossKernel3x3 = np.array([[0,1,0],[1,1,1],[0,1,0])\nkernel1x3 = np.array([[1,1,1]])\nkernel3x1 = np.transpose(kernel1x3)\nkernel3x3 = kernel3x1 * kernel1x3\nheightmap = np.array(worldSlice.heightmaps[\"WORLD_SURFACE\"], dtype = np.uint8)\n# heightmap = mapUtils.fractalnoise((area[2], area[3]))\n\nfloorWallMappings = [\n# [f,w,c,s] (floor, wall, ceiling, space)\n [1,1,1], # 0: wall\n [1,0,0], # 1: walkway\n [0,0,0], # 2: void\n [1,0,1], # 3: floor/ceiling\n [1,1,1] # 4: seawall\n]\n\nabsoluteFloor = 60\n\nfor i in range(layers):\n image = wfc_implementation.runWFC(tiles, w, h, 2)\n image = image.astype('uint8')\n image = cv2.resize(image, (area[2],area[3]), interpolation=cv2.INTER_NEAREST)\n\n buildings = (image == 0).astype(np.uint8)\n void = (image == 2).astype(np.uint8)\n railings = void - cv2.erode(void, strctElmt3x3)\n hrailings = (cv2.filter2D(railings, -1, kernel1x3) == 3).astype(np.uint8)\n vrailings = (cv2.filter2D(railings, -1, kernel3x1) == 3).astype(np.uint8)\n crailings = railings - hrailings - vrailings\n buildingsdilated = cv2.dilate(buildings, strctElmt3x3)\n walkwdeco = cv2.dilate(buildings, strctElmt3x3) - buildings\n walkwdx = (cv2.filter2D(buildings, cv2.CV_16S, np.array([[-1,0,1]])) * walkwdeco + 1) # 0,1,2\n walkwdy = (cv2.filter2D(buildings, cv2.CV_16S, np.array([[-1],[0],[1]])) * walkwdeco + 1) # # 0,1,2\n walkwMap = walkwdx * 3 + walkwdy\n walkwoc = (cv2.filter2D(buildingsdilated, -1, kernel3x3) == 4) * walkwdeco\n walkwoc = (cv2.dilate(walkwoc, strctElmt3x3) - walkwoc) * walkwdeco\n # walls: 1,0 1,2 0,1 2,1 = 3, 5, 1, 7\n # neutral: 4\n # inner corners: 0,0 0,2 2,0 2,2 = 0, 2, 6, 8\n\n # make it so that walkWMap is '10' at outer corners:\n walkwMap = np.where(walkwoc, 10, walkwMap)\n\n # mapUtils.visualize(walkwMap, walkwoc)\n\n image = image + cv2.erode(buildings, strctElmt3x3) * 3 # basically insides of buildings (0) to floor/ceiling (3)\n cv2.rectangle(image, (0,0), (image.shape[0]-1, image.shape[1]-1), (4), 1) # build seawall\n\n # mapUtils.visualize(image, railings, vrailings, hrailings, crailings)\n # mapUtils.visualize(image)\n\n startHeights = [\n absoluteFloor - 20 if i == 0 else absoluteFloor + 12 * i,\n absoluteFloor + 12 * i + 1,\n absoluteFloor + 12 * i + 12,\n absoluteFloor + 12 * (i) + 13\n ] # [floor, wall, ceiling, space, next level]\n\n # cardinals = [\"east\", \"south\", \"north\", \"west\"]\n cardinals = [\"north\", \"west\", \"east\", \"south\"]\n protectedBlocks = [\"minecraft:blackstone\", \"minecraft:gray_concrete\"]\n\n # do construction\n for x in range(area[2]):\n for z in range(area[3]):\n yTerrain = int(heightmap[(x,z)])\n buildType = int(image[(x,z)])\n\n for j in range(len(startHeights) - 1):\n buildingBlock = \"air\" if floorWallMappings[buildType][j] == 0 else \"gray_concrete\" if buildType == 0 or buildType == 3 else \"blackstone\"\n y1 = startHeights[j]\n y2 = startHeights[j + 1]\n if buildType == 4:\n y2 = min(y2, yTerrain)\n for y in range(y1, y2):\n if not worldSlice.getBlockAt((area[0] + x, y, area[1] + z)) in protectedBlocks:\n interfaceUtils.placeBlockBatched(area[0] + x, y, area[1] + z, buildingBlock, 1000)\n \n # railings\n ry = startHeights[1]\n if hrailings[(x,z)]:\n interfaceUtils.placeBlockBatched(area[0] + x, ry, area[1] + z, \"end_rod[facing=north]\", 1000)\n elif vrailings[(x,z)]:\n interfaceUtils.placeBlockBatched(area[0] + x, ry, area[1] + z, \"end_rod[facing=east]\", 1000)\n elif crailings[(x,z)]:\n for i in range(2):\n interfaceUtils.placeBlockBatched(area[0] + x, ry - 1 + i, area[1] + z, \"gray_concrete\", 1000)\n # walkway decoration\n wmapVal = int(walkwMap[(x,z)])\n if wmapVal % 2 == 1:\n # wall adjacent\n dir = int((wmapVal - 1) / 2)\n cdir1 = cardinals[dir]\n cdir2 = cardinals[(dir + 2) % 4]\n interfaceUtils.placeBlockBatched(area[0] + x, ry, area[1] + z, \"polished_blackstone_stairs[facing=%s]\" % cdir1, 1000)\n interfaceUtils.placeBlockBatched(area[0] + x, ry+1, area[1] + z, \"end_rod[facing=%s]\" % cdir2, 1000)\n interfaceUtils.placeBlockBatched(area[0] + x, ry+2, area[1] + z, \"polished_blackstone_slab\", 1000)\n elif wmapVal % 2 == 0 and wmapVal != 4:\n # inner corner\n for i in range(3):\n interfaceUtils.placeBlockBatched(area[0] + x, ry + i, area[1] + z, \"polished_blackstone\", 1000)\n\n interfaceUtils.sendBlocks() # send remaining blocks in buffer"
] |
[
[
"numpy.array",
"numpy.where",
"numpy.transpose"
]
] |
xiemenghua/Q-learing--robort-model
|
[
"68b7415d28ce83c19788f3167456c317db553d4b"
] |
[
"Q-learing-robot-5.py"
] |
[
"\n# coding: utf-8\n\n# In[1]:\n\nfrom itertools import product as selfcrossjoin\nimport numpy as np\nQtable = {}\n\n\n#分两次更新Q表,当落子没有占满棋盘时,当落子占满棋盘\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n#利用itertools 笛卡尔生成器生成 Q表,并初始化Q值为全部0\n#因为9个位置,每个位置有3种可能组合0,1,2。期盘有9个格子 因此排列组合的可能性有 3^9=19683种组合\nx=0\nfor i in selfcrossjoin([0,1,2], repeat = 25):\n Qtable.update({i : 0})\n x=x+1\n#输出的初始Q表类似下面的样子:\n#25代表棋盘5*5=25 \n'''{(1, 2, 1, 2, 2, 2, 0, 1, 0): 0, (1, 2, 1, 1, 2, 0, 1, 2, 0): 0......}'''\nprint('Q表中的状态一共有',x)\n#print(Qtable)\n\n\n# In[ ]:\n\ndef get_R_and_Win(state): #返回R值以及判断胜利\n #print('get_R_and_Win传入的state为',state)\n board = [(0,1,2,3,4), (5,6,7,8,9), (10,11,12,13,14),(15,16,17,18,19), (20,21,22,23,24), (0,5,10,15,20),(1,6,11,16,21), \n (2,7,12,17,22),(3,8,13,18,23),(4,9,14,19,24),(0,6,12,18,24),(4,8,12,16,20)]\n for (p1, p2, p3,p4,p5) in board:\n if (state[p1] != 0):\n if (state[p1] == state[p2] == state[p3]==state[p4]==state[p5]):\n return 1\n return 0\n\n\n# In[ ]:\n\n# 返回一个随机位置,传入参数为期盘,Q表,和玩家\ndef get_a_move(board, P_Qtable, gamer): # return random position in space board\n #print('get_a_move传入的board为' ,board,'gamer是',gamer)\n stage_var = []\n for i in range(len(board)):\n #判断期盘是否有子,如果没有子就送入stage_var,作为候选位置\n if (board[i] == 0):\n stage_var.append(i)\n #print('get_a_move stage_var append 现在为:',stage_var)\n length = len(stage_var)\n returnpoistion=stage_var[np.random.randint(0, length)]\n #print('get_a_move返回的可用的候选落子位置为',returnpoistion)\n return (returnpoistion)\n\n\n# In[ ]:\n\n# 在当前状态下。 下一个状态的Q值\n\n##############################################\n\n# 完成返回Q值函数定义\n\n##############################################\n\ndef getnextQvalue(board, P_Qtable, gamer): #返回当前状态下 如果选择选择所有可能动作进入下一个可能所有状态,的所有Q值\n \n if (0 in board):\n \n Qvalue = []\n \n for i in range(len(board)):\n if (board[i]== 0):\n stage_var = board[:]\n stage_var[i] = gamer\n \n temp_tuple = tuple(stage_var)\n \n Qvalue.append(P_Qtable[temp_tuple])\n return Qvalue\n else :\n return [0]\n\n\n# In[1]:\n\n\ndef upd_pre_state_Q(length, Qtable,state,alpha=0.2,gamma=0.7):\n preState = state[:]\n\n while (length > 0):\n\n preState[p[length]] = 0\n\n \n \n Qtable[tuple(preState)] = Qtable[tuple(preState)] + alpha *(gamma * Qtable[tuple(state)] - Qtable[tuple(preState)])\n state = preState[:]\n\n length =length - 1\n\n return 0\n\n\n# In[2]:\n\ndef AI_Play_Move(board, P_Qtable): #计算最大Q值对应的位置\n temp_Q=-10000\n ai_move = 0\n \n for i in range(len(board)):\n stage_var = board[:] \n if (board[i] == 0):\n \n stage_var[i] = 1\n \n if (P_Qtable[tuple(stage_var)] > temp_Q):\n \n temp_Q = P_Qtable[tuple(stage_var)]\n ai_move = i\n #print('当前的i=',ai_move,'是Q最大作为落子点返回') \n return ai_move\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\ndef train(iteration=2000,alpha=0.2,gamma=0.7):\n episode = 0\n while (episode < iteration):\n #while (episode < 50):\n episode += 1\n\n state = [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n global p\n p = []\n turn = 1\n while ( 0 in state):\n position = get_a_move(state, Qtable, turn) # 返回期盘的随机位置\n \n p.append(position) \n \n state[position] = turn\n \n if(get_R_and_Win(state) == 1 ):\n if (turn == 1):\n Qtable[tuple(state)] = 1\n \n elif(turn == 2):\n Qtable[tuple(state)] = -1\n \n break\n elif(turn == 1):\n turn = 2\n\n else: #turn == 2\n turn = 1\n #'设置玩家=1再次计算更新Q值,下一个状态应该是1也就是AI落子,我们希望选择的动作具有最大的Q值')\n t = getnextQvalue(state,Qtable, turn) #q-value\n future_score = (gamma * max(t) - Qtable[tuple(state)]) #q-value\n Qtable[tuple(state)] = Qtable[tuple(state)] + alpha * future_score #q-value\n length = len(p) -1\n\n upd_pre_state_Q(length, Qtable, state,alpha,gamma)\n \ntrain()\n\n\n# In[ ]:\n\nboard = [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0]\ninstruct_board = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]\nprint('*********棋盘落子位置说明************')\n\nprint(instruct_board[0:4])\nprint(instruct_board[5:9])\nprint(instruct_board[10:14])\nprint(instruct_board[15:19])\nprint(instruct_board[20:24])\n\nprint('**************对局开始*****************')\n\nprint(board[0:4])\nprint(board[5:9])\nprint(board[10:14])\nprint(board[15:19])\nprint(board[20:24])\nwhile (0 in board): \n postion = AI_Play_Move(board, Qtable)\n board[postion] = 1#在选择的棋盘位置填入1\n print('AI落子现在棋盘为:')\n print(board[0:4])\n print(board[5:9])\n print(board[10:14])\n print(board[15:19])\n print(board[20:24])\n \n if (get_R_and_Win(board) == 1): #检查是否胜利\n print ('人工智能获得胜利')\n break\n userposition = int(input(\"输入你的位置\"))\n board[userposition] = 2 #在用户输入的位置填入2\n print(board[0:4])\n print(board[5:9])\n print(board[10:14])\n print(board[15:19])\n print(board[20:24])\n if (get_R_and_Win(board) == 1):\n print ('你获得了胜利')\n break\n\n\n# In[ ]:\n\n\n\n"
] |
[
[
"numpy.random.randint"
]
] |
emac1234/supportagent
|
[
"c84c1d4f8ac4f0fc73310331b9195b1ccbce89b3"
] |
[
"supportagent/utils.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"Zendesk Generator.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1fqcKvmj376PXj1Yifu5dQ6gJq_hsqH9l\n\"\"\"\n\n# Commented out IPython magic to ensure Python compatibility.\n# %%capture\n# !pip install transformers\n# !pip install zenpy\n\nfrom transformers import GPT2LMHeadModel, GPT2Tokenizer, AutoModelForCausalLM, AutoTokenizer, BertForMaskedLM, \\\n BertTokenizer\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch\nimport requests\nfrom torch.utils.data import Dataset, DataLoader, IterableDataset\nimport re\nimport pandas as pd\nfrom collections import Counter\nfrom pprint import pprint\nfrom zenpy import Zenpy\nfrom zenpy.lib.exception import RecordNotFoundException\nimport datetime\nfrom zenpy.lib.api_objects import Comment, Ticket\nfrom random import random\nfrom pprint import pprint\n\nZENDESK_USER = os.environ['ZENDESK_USER']\nZENDESK_TOKEN = os.environ['ZENDESK_TOKEN']\n\nZENPY_CLIENT = Zenpy(subdomain='iliff', email=ZENDESK_USER,\n token=ZENDESK_TOKEN)\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# model = model.to(device)\n\n# find a pretrained model for maskedtasks\ntest_model = BertForMaskedLM.from_pretrained('bert-large-uncased').to(device)\ntest_tokenizer = BertTokenizer.from_pretrained('bert-large-uncased')\n\n\ndef get_metric(sentence):\n with torch.no_grad():\n tokens = test_tokenizer(sentence, return_tensors='pt').to(device)\n outputs = test_model(**tokens)\n\n softmax = torch.softmax(outputs.logits, dim=-1)[0]\n input_ids = tokens['input_ids'][0]\n probabilities = []\n for i,token in enumerate(input_ids):\n token_row = softmax[i]\n input_id = input_ids[i]\n token_probability = token_row[input_id]\n decoded = test_tokenizer.decode([input_id])\n probabilities.append((decoded, token_probability.item()))\n pprint(probabilities)\n return probabilities\n\n\ndef respond_to_ticket(ticket_id):\n ticket = ZENPY_CLIENT.tickets(id=ticket_id)\n\n user_comment = list(ZENPY_CLIENT.tickets.comments(ticket))[-1].plain_body\n with torch.no_grad():\n sentence = tokenizer(user_comment + ' <|endoftext|>', return_tensors='pt').to(device)\n outputs = model.generate(**sentence,\n max_length=500,\n min_length=10,\n do_sample=True,\n top_p=.8,\n top_k=3,\n num_return_sequences=1,\n repetition_penalty=1.2,\n temperature=0.7)\n output = outputs[0]\n decoded = tokenizer.decode(output)\n\n ticket_comment = decoded.split('<|endoftext|>')[1]\n get_metric(ticket_comment)\n ticket_comment = \\\n re.split(r'[\\n\\r]\\s*with\\s+respect,?\\s*[\\n\\r]|[\\n\\r]\\s*best,?\\s*[\\n\\r]|[\\n\\r]\\s*thanks,?\\s*[\\n\\r]', ticket_comment,\n flags=re.I)[0]\n ticket.comment = Comment(body=ticket_comment, public=False)\n ZENPY_CLIENT.tickets.update(ticket)\n\n\ndef needs_comment(ticket):\n user_comment = list(ZENPY_CLIENT.tickets.comments(ticket))[-1]\n if user_comment.author.email in ['[email protected]', '[email protected]', '[email protected]', '[email protected]']:\n return False\n return True\n\n\ndef get_new_tickets():\n new_tickets = ZENPY_CLIENT.search(type='ticket', status='new')\n return list(new_tickets)\n\n\n# respond_to_ticket(35362)\n\nif __name__ == \"__main__\":\n\n model = torch.load('model_1.3485576329479791.pt')\n tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-medium')\n\n model = model.to(device)\n\n respond_to_ticket(35362)"
] |
[
[
"torch.softmax",
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
]
] |
capslockwizard/drsip
|
[
"ad6a6fe185629a5f3801ce548f0adb86f1684877"
] |
[
"drsip/save_load.py"
] |
[
"\"\"\"\r\n=====================================================\r\nSaving and Loading (:mod:`drsip.save_load`)\r\n=====================================================\r\n\r\nModule contains helper functions to save and load DR-SIP data files.\r\n\r\nFunctions\r\n---------\r\n\r\n.. autofunction:: save_pd_table\r\n.. autofunction:: load_pd_table\r\n.. autofunction:: load_StrIO\r\n.. autofunction:: convert_StrIO_or_file_to_str\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport drsip_common\r\n\r\n\r\ndef save_pd_table(storage, table, var_name):\r\n \"\"\"Split and store the values and indices\r\n\r\n Parameters\r\n ----------\r\n dist_mat_1, dist_mat_2 : np.array\r\n NxN distance matrices between the 2 monomers in the docking\r\n pose. Where N are the number of atoms.\r\n\r\n Returns\r\n -------\r\n np.array\r\n Returns a new distance matrix containing the minimum values for\r\n each element.\r\n \"\"\"\r\n\r\n storage[var_name + '_val'] = table.values.tolist()\r\n storage[var_name + '_idx'] = table.index.tolist()\r\n\r\n if table.index.name is None:\r\n storage[var_name + '_col'] = table.reset_index().columns.tolist()\r\n\r\n else:\r\n storage[var_name + '_col'] = table.columns.tolist()\r\n\r\n\r\ndef load_pd_table(storage, var_name, pop=False):\r\n \"\"\"Reconstruct Pandas table from dictionary key-value pairs and save to storage\r\n\r\n Args:\r\n storage (dict): Dictionary to load the data from\r\n var_name (str): Original variable name\r\n pop (bool, optional): If True, remove key-value pairs used to\r\n reconstruct the table from storage\r\n \"\"\"\r\n\r\n if pop:\r\n value = storage.pop(var_name + '_val')\r\n index = storage.pop(var_name + '_idx')\r\n columns = storage.pop(var_name + '_col')\r\n\r\n else:\r\n value = storage[var_name + '_val']\r\n index = storage[var_name + '_idx']\r\n columns = storage[var_name + '_col']\r\n\r\n if len(value) != 0 and len(value[0]) != len(columns):\r\n storage[var_name] = pd.DataFrame(\r\n value, index=index, columns=columns[1:])\r\n storage[var_name].index.name = columns[0]\r\n\r\n else:\r\n storage[var_name] = pd.DataFrame(value, index=index, columns=columns)\r\n\r\n\r\ndef load_StrIO(storage, var_name):\r\n \"\"\"Convert strings in storage to StringIO and replace original string in\r\n storage\r\n\r\n Args:\r\n storage (dict): Dictionary to load and save the string/StringIO data\r\n var_name (str): Variable name\r\n \"\"\"\r\n\r\n file_str = storage.pop(var_name)\r\n\r\n storage[var_name] = drsip_common.convert_str_to_StrIO(file_str)\r\n\r\n\r\ndef convert_StrIO_or_file_to_str(input_obj):\r\n\r\n if isinstance(input_obj, basestring):\r\n input_str = drsip_common.convert_file_to_str(input_obj)\r\n\r\n else:\r\n input_str = drsip_common.convert_StrIO_to_str(input_obj)\r\n\r\n return input_str\r\n"
] |
[
[
"pandas.DataFrame"
]
] |
Flamexmt/LMA
|
[
"bb500b7ae48a3f6751d6434126de9845b58d2d65"
] |
[
"datasets/MNIST.py"
] |
[
"import os\nimport torchvision\nimport torchvision.transforms as vision_transforms\nimport datasets\nimport torch\nimport datasets.torchvision_extension as vision_transforms_extension\n\n\nmeanstd = {\n 'mean':(0.1307,),\n 'std': (0.3081,),\n}\n\nclass MNIST(object):\n def __init__(self, dataFolder=None, pin_memory=False):\n\n self.dataFolder = dataFolder if dataFolder is not None else os.path.join(datasets.BASE_DATA_FOLDER, 'MNIST')\n self.pin_memory = pin_memory\n self.meanStd = meanstd\n\n #download the dataset\n torchvision.datasets.MNIST(self.dataFolder, download=True)\n\n def getTrainLoader(self, batch_size, shuffle=True, num_workers=1, checkFileIntegrity=False):\n\n #first we define the training transform we will apply to the dataset\n listOfTransoforms = []\n listOfTransoforms.append(vision_transforms.ToTensor())\n listOfTransoforms.append(vision_transforms.Normalize(mean=self.meanStd['mean'],\n std=self.meanStd['std']))\n train_transform = vision_transforms.Compose(listOfTransoforms)\n\n #define the trainset\n trainset = torchvision.datasets.MNIST(root=self.dataFolder, train=True,\n download=checkFileIntegrity, transform=train_transform)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=shuffle,\n num_workers=num_workers, pin_memory=self.pin_memory)\n\n return trainloader\n\n def getTestLoader(self, batch_size, shuffle=True, num_workers=1, checkFileIntegrity=False):\n\n listOfTransoforms = [vision_transforms.ToTensor()]\n listOfTransoforms.append(vision_transforms.Normalize(mean=self.meanStd['mean'],\n std=self.meanStd['std']))\n\n test_transform = vision_transforms.Compose(listOfTransoforms)\n testset = torchvision.datasets.MNIST(root=self.dataFolder, train=False,\n download=checkFileIntegrity, transform=test_transform)\n testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=shuffle,\n num_workers=num_workers, pin_memory=self.pin_memory)\n\n return testloader"
] |
[
[
"torch.utils.data.DataLoader"
]
] |
Jayram999/Alert-Generation-on-Detection-of-Suspicious-Activity-using-Transfer-Learning
|
[
"4c8a257d76d57fbfcb76758039fc393b7ad1f5d6"
] |
[
"videoClassification/predict_video_realtime.py"
] |
[
"#python predict_video_realtime.py --model model/activity_gpu.model --label-bin model/lb.pickle --output output/shoplifting.avi --size 64\n\n# import the necessary packages\nfrom keras.models import load_model\nfrom imutils.video import VideoStream\nfrom collections import deque\nimport numpy as np\nimport argparse\nimport time\nimport pickle\nimport cv2\nfrom twilio.rest import Client\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--model\", required=True,\n\thelp=\"path to trained serialized model\")\nap.add_argument(\"-l\", \"--label-bin\", required=True,\n\thelp=\"path to label binarizer\")\nap.add_argument(\"-i\", \"--input\", required=False,\n\thelp=\"path to our input video\")\nap.add_argument(\"-o\", \"--output\", required=True,\n\thelp=\"path to our output video\")\nap.add_argument(\"-s\", \"--size\", type=int, default=128,\n\thelp=\"size of queue for averaging\")\nargs = vars(ap.parse_args())\n\n# load the trained model and label binarizer from disk\nprint(\"[INFO] loading model and label binarizer...\")\nmodel = load_model(args[\"model\"])\nlb = pickle.loads(open(args[\"label_bin\"], \"rb\").read())\n\n# initialize the image mean for mean subtraction along with the\n# predictions queue\nmean = np.array([123.68, 116.779, 103.939][::1], dtype=\"float32\")\nQ = deque(maxlen=args[\"size\"])\n\n# initialize the video stream, pointer to output video file, and\n#------------------# frame dimensions\n# initialize the video stream and pointer to output video file, then\n# allow the camera sensor to warm up\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream(src=0).start()\nwriter = None\ntime.sleep(2.0)\n#-------------------\n(W, H) = (None, None)\n#client = Client(\"XXXXXX\", \"XXXXXX\") # copy from twilio\nprelabel = ''\nprelabel = ''\nok = 'Normal'\nfi_label = []\nframecount = 0\n# loop over frames from the video file stream\nwhile True:\n\t# read the next frame from the file\n\tframe = vs.read()\n\n\t# if the frame was not grabbed, then we have reached the end\n\t# of the stream\n\n\t# if the frame dimensions are empty, grab them\n\tif W is None or H is None:\n\t\t(H, W) = frame.shape[:2]\n\n\t# clone the output frame, then convert it from BGR to RGB\n\t# ordering, resize the frame to a fixed 224x224, and then\n\t# perform mean subtraction\n\toutput = frame.copy()\n\tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\tframe = cv2.resize(frame, (224, 224)).astype(\"float32\")\n\tframe -= mean\n\n\t# make predictions on the frame and then update the predictions\n\t# queue\n\tpreds = model.predict(np.expand_dims(frame, axis=0))[0]\n\t#proba = model.predict(frame)[0]\n\t#print('new prob', proba)\n\tprediction = preds.argmax(axis=0)\n\tQ.append(preds)\n\n\t# perform prediction averaging over the current history of\n\t# previous predictions\n\tresults = np.array(Q).mean(axis=0)\n\tprint('Results = ', results)\n\tmaxprob = np.max(results)\n\tprint('Maximun Probability = ', maxprob)\n\ti = np.argmax(results)\n\tlabel = lb[i]\n#\tlabelnew = lb.classes_[i]\n\trest = 1 - maxprob\n\n\tdiff = (maxprob) - (rest)\n\tprint('Difference of prob ', diff)\n\tth = 100\n\tif diff > .80:\n\t\tth = diff\n\n\tif (preds[prediction]) < th:\n\t\ttext = \"Alert : {} - {:.2f}%\".format((ok), 100 - (maxprob * 100))\n\t\tcv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.25, (0, 255, 0), 5)\n\telse:\n\t\tfi_label = np.append(fi_label, label)\n\t\ttext = \"Alert : {} - {:.2f}%\".format((label), maxprob * 100)\n\t\tcv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.25, (0, 255, 0), 5)\n#\t\tif label != prelabel: #update to get alert on your mobile number\n#\t\t\tclient.messages.create(to=\"countrycode and mobile number\", #for example +91XXXXXXXXXX\n# from_=\"Sender number from twilio\", #example +1808400XXXX\n# body='\\n'+ str(text) +'\\n Satellite: ' + str(camid) + '\\n Orbit: ' + location)\n\t\tprelabel = label\n\n\n# change the \"from_\" number to your Twilio number and the \"to\" number\n# to the phone number you signed up for Twilio with, or upgrade your\n# account to send SMS to any phone number\n\n\t# check if the video writer is None\n\tif writer is None:\n\t\t# initialize our video writer\n\t\tfourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n\t\twriter = cv2.VideoWriter(args[\"output\"], fourcc, 30,\n\t\t\t(W, H), True)\n\n\t# write the output frame to disk\n\twriter.write(output)\n\n\t# show the output image\n\tcv2.imshow(\"Output\", output)\n\tkey = cv2.waitKey(1) & 0xFF\n\n\t# if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n\n# release the file pointers\nprint(\"[INFO] cleaning up...\")\nwriter.release()\nvs.release()\n"
] |
[
[
"numpy.expand_dims",
"numpy.max",
"numpy.append",
"numpy.argmax",
"numpy.array"
]
] |
1at7/Travelling-Thief-Problem
|
[
"e62335af29b7503808b69b57984f6758398d1150"
] |
[
"tsp.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\r\nimport random\r\nimport math\r\nimport numpy\r\nimport matplotlib.pyplot as plt\r\nfrom collections import deque\r\n\r\n\r\ndef decide_to(p):\r\n '''\r\n Randomly returns True/False based on probability.\r\n \r\n Args:\r\n p :float of [0,1]; probability of the return value being True\r\n Returns:\r\n True with the probability of p and False with the probability of (1-p)\r\n '''\r\n r = random.uniform(0, 1)\r\n return r <= p\r\n\r\n\r\ndef mutate(ind, N):\r\n '''\r\n Mutates the individual ind by swapping two genomes.\r\n Args:\r\n ind : |N| list/numpy.array of chromosome/individual\r\n N : length of ind \r\n Returns:\r\n None\r\n '''\r\n i = random.randrange(0, N)\r\n j = random.randrange(0, N)\r\n ind[i], ind[j] = ind[j], ind[i]\r\n return\r\n\r\n\r\ndef crossover(father, mother, child1, child2, N):\r\n '''\r\n Performs the Order 1 Crossover and produces two children by modifying child1 and child2.\r\n \r\n Args:\r\n father: |N| list/numpy.array representing the father chromosome\r\n mother: |N| list/numpy.array representing the mother chromosome\r\n child1: |N| list/numpy.array representing the first child\r\n child2: |N| list/numpy.array representing the second child\r\n N: length of all the input chromosomes\r\n Returns:\r\n None\r\n '''\r\n # randomly choosing the crossover range\r\n i = random.randrange(0, N)\r\n j = random.randrange(0, N)\r\n if i > j:\r\n i, j = j, i\r\n\r\n # keeping track of the exact intervals\r\n check1 = numpy.zeros(N)\r\n check2 = numpy.zeros(N)\r\n\r\n for x in range(i, j + 1):\r\n child1[x] = father[x] # Redundant because child1 is initialized as the father\r\n child2[x] = mother[x] # Redundant because child2 is initialized as the mother\r\n check1[father[x]] = 1\r\n check2[mother[x]] = 1\r\n\r\n # copying the remaining genomes sequentially for child1\r\n x = 0\r\n index = 0 + ((i == 0) * (j + 1))\r\n while x < N and index < N:\r\n if not check1[mother[x]]:\r\n child1[index] = mother[x]\r\n index += 1\r\n if index == i:\r\n index = j + 1\r\n x += 1\r\n # copying the remaining genomes sequentially for child2\r\n x = 0\r\n index = 0 + ((i == 0) * (j + 1))\r\n while x < N and index < N:\r\n if not check2[father[x]]:\r\n child2[index] = father[x]\r\n index += 1\r\n if index == i:\r\n index = j + 1\r\n x += 1\r\n return\r\n\r\n\r\ndef distance(points, order, N):\r\n '''\r\n Computes the euclidean distance of a cycle\r\n \r\n Args:\r\n points : |Nx2| list/numpy.array of coordinates of points\r\n order : |N| list/numpy.array of ordering of points (zero indexed)\r\n Returns:\r\n euclidean distance of points from point0 to point1 to ... pointN back to point0\r\n Examples:\r\n >>> distance([[0,0],[0,4],[3,4]],[0,1,2],3)\r\n 12.0\r\n '''\r\n x0 = points[0][0]\r\n y0 = points[0][1]\r\n xi = points[order[0]][0]\r\n yi = points[order[0]][1]\r\n s = math.sqrt((x0 - xi) ** 2 + (y0 - yi) ** 2)\r\n for i in range(1, N):\r\n x1 = points[order[i - 1]][0]\r\n y1 = points[order[i - 1]][1]\r\n x2 = points[order[i]][0]\r\n y2 = points[order[i]][1]\r\n s += round(math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2))\r\n xn = points[order[N-1]][0]\r\n yn = points[order[N-1]][1]\r\n s = s + math.sqrt((x0 - xn) ** 2 + (y0 - yn) ** 2)\r\n return s\r\n\r\n\r\ndef init_population(K, N):\r\n '''\r\n Initializes the population of K chromosomes with N genome for each chromosome by modifying pop.\r\n \r\n Args:\r\n K: number of chromosomes\r\n N: number of genomes in each chromosome\r\n Returns:\r\n |KxN| numpy.array of K chromosomes with N genome for each\r\n '''\r\n pop = numpy.zeros((K, N), dtype=numpy.int32)\r\n # each chromosome is a shuffle of sequence 1:N\r\n seq = list(range(N))\r\n for i in range(K):\r\n random.shuffle(seq)\r\n pop[i] = seq\r\n return pop\r\n\r\n\r\ndef compute_population_fitness(pop, points, K, N):\r\n '''\r\n Computes the fitness for each chromosome in the population by modifying fit.\r\n (Fitness of each chromosome is the negative of its cycle distance.)\r\n \r\n Args:\r\n pop: |KxN| list/numpy.array of K chromosomes with N genome for each\r\n points: |Nx2| list/numpy.array of coordinates of points\r\n K: number of chromosomes\r\n N: number of genomes in each chromosome\r\n Returns:\r\n fit, a |K| list/numpy.array of floats where fit[k] = fitness of pop[k].\r\n Examples:\r\n >>> compute_population_fitness([[0,3,1,2],[2,1,0,3]],[[0,0],[0,4],[3,4],[3,0]],2,4)\r\n array([-16., -14.])\r\n '''\r\n fit = numpy.zeros(K)\r\n for k in range(K):\r\n # fitness of each chromosome is the negative of its cycle distance\r\n fit[k] = -distance(points, pop[k], N)\r\n return fit\r\n\r\n\r\ndef find_cumulative_distribution(arr, K):\r\n '''\r\n Computes cumulative distribution (percentages) of arr.\r\n \r\n Args:\r\n arr: |K| numpy.array of numbers.\r\n K: length of arr\r\n Returns:\r\n cd, |K| numpy.array of floats containing the cumulative distributions\r\n where cd[i] is the probability that a uniform random number in [0,arr.sum()] is\r\n less than arr[:i].sum()\r\n Examples:\r\n >>> find_cumulative_distribution(numpy.array([4,2,2]),3)\r\n array([ 0.5 , 0.75, 1. ])\r\n '''\r\n cd = numpy.zeros(K)\r\n acc = 0\r\n s = arr.sum()\r\n for i in range(K):\r\n acc += arr[i] / s\r\n cd[i] = acc\r\n return cd\r\n\r\n\r\ndef select_parent(fitness, K):\r\n '''\r\n Select and index for parent based on fitness of each chromosome using the roulette wheel technique.\r\n \r\n Args:\r\n fitness: |K| list/numpy.array of numbers representing the fitness for each chromosome\r\n K: length of fitness\r\n Returns:\r\n index of the randomly selected parent\r\n '''\r\n local_absolute_fitness = fitness - fitness.min() # now worst individual has fitness 0\r\n # implementation of roulette wheel technique for choosing a random number representing\r\n # the parent by using the cumulative probability of each element of the fitness list.\r\n cd = find_cumulative_distribution(local_absolute_fitness, K)\r\n roulette = random.uniform(0, 1)\r\n ind = 0\r\n while roulette > cd[ind]:\r\n ind += 1\r\n return ind\r\n\r\n\r\ndef create_new_population(pop, fitness, K, N, crossover_probability, mutation_probability):\r\n '''\r\n Creates a new population of K chromosomes of N genomes\r\n by crossovers and mutations over the current population.\r\n \r\n Args:\r\n pop: |KxN| list/numpy.array of K chromosomes with N genome for each chromosome\r\n representing the current population\r\n fitness: |K| list/numpy.array of fitness of each chromosome in pop\r\n K: number of chromosomes\r\n N: number of genomes in each chromosome\r\n crossover_probability: float in [0,1] representing crossover probability\r\n mutation_probability: float in [0,1] representing mutation probability\r\n Returns:\r\n |KxN| list/numpy.array of K chromosomes with N genome for each chromosome\r\n representing the new population\r\n '''\r\n new_pop = numpy.zeros((K, N), dtype=numpy.int32)\r\n for k in range(K // 2): # 2 children are created in each iteration\r\n father_ind = select_parent(fitness, K)\r\n mother_ind = select_parent(fitness, K)\r\n\r\n father = pop[father_ind]\r\n mother = pop[mother_ind]\r\n child1 = father.copy()\r\n child2 = mother.copy()\r\n\r\n if decide_to(crossover_probability):\r\n crossover(father, mother, child1, child2, N)\r\n if decide_to(mutation_probability):\r\n mutate(child1, N)\r\n if decide_to(mutation_probability):\r\n mutate(child2, N)\r\n\r\n new_pop[k * 2] = child1\r\n new_pop[k * 2 + 1] = child2\r\n return new_pop\r\n\r\n\r\ndef find_best_individual(pop, fitness, best_individual, best_fit):\r\n '''\r\n Finds the best individual and the fitness of that individual in all the generations.\r\n \r\n Args:\r\n pop: |KxN| list/numpy.array of K chromosomes with N genome for each\r\n fitness: |K| list/numpy.array of numbers representing the fitness for each chromosome\r\n best_individual: |N| list/numpy.array representing the best individual/chromosome\r\n so far excluding the current population.\r\n best_fit: number representing the fitness of best_individual\r\n Returns:\r\n {best individual so far},{fitness of best individual so far}\r\n '''\r\n current_best_index = fitness.argmax()\r\n current_best_fit = fitness[current_best_index]\r\n current_best_individual = pop[current_best_index]\r\n\r\n if best_fit < current_best_fit:\r\n return current_best_individual, current_best_fit\r\n else:\r\n return best_individual, best_fit\r\n\r\n\r\ndef read_input(path, N):\r\n '''\r\n Reads the first N lines of the .txt file denoted by path\r\n containing the coordinates of the points in the following format:\r\n x_1 y_1\r\n x_2 y_2\r\n ...\r\n \r\n Args:\r\n path: string that indicates the path to the text file containing coordinates of n points\r\n Returns:\r\n |Nx2| numpy.array of coordinates of points\r\n '''\r\n points = numpy.zeros((N, 2))\r\n file = open(path)\r\n lines = file.readlines()\r\n lines = [x.replace(',',' ') for x in lines]\r\n file.close()\r\n for i in range(N):\r\n points[i][0], points[i][1] = map(int, lines[i].split())\r\n return points\r\n\r\n\r\ndef plot_individual_path(individual, points, title, index):\r\n '''\r\n Plots individual cycle in the index of a 3x5 plot\r\n \r\n Args:\r\n individual: |N| list/numpy.array of a chromosome\r\n points: |Nx2| list/numpy.array of coordinates of points\r\n title: title of the plot\r\n index: integer in [1,15] denoting the position of the plot in a 3x5 matplotlib subplots.\r\n Returns:\r\n None\r\n '''\r\n x = []\r\n y = []\r\n for i in individual:\r\n x.append(points[i][0])\r\n y.append(points[i][1])\r\n x.append(x[0])\r\n y.append(y[0])\r\n\r\n plt.subplot(3, 5, index)\r\n plt.title(title)\r\n plt.xticks([], [])\r\n plt.yticks([], [])\r\n plt.plot(x, y, 'r*')\r\n plt.plot(x, y, 'g--')\r\n return\r\n\r\n\r\ndef plot_results(best_last_15, points):\r\n '''\r\n Plots and displays the best last 15 chromosomes generated through the generations.\r\n \r\n Args:\r\n best_last_15: |M| deque/list of (A,B) where A is one of the best chromosomes and B is its cycle distance\r\n and M<=15\r\n points: |Nx2| list/numpy.array of coordinates of points\r\n Returns:\r\n None\r\n '''\r\n for i in range(0,len(best_last_15)):\r\n plot_individual_path(best_last_15[i][0], points, str(round(best_last_15[i][1], 2)), i+1)\r\n plt.show()\r\n return\r\n\r\n\r\ndef TSP_genetic(n, k, max_generation, crossover_probability, mutation_probability, path):\r\n '''\r\n Solves the Traveling Sales Person Problem using genetic algorithm with chromosomes decoded\r\n as cycles (solutions) of traveling Order 1 crossover, Swap mutation, complete generation \r\n replacement, Roulette Wheel Technique for choosing parents and negative of cycle distance for fitness.\r\n \r\n \r\n Args:\r\n n: integer denoting the number of points and also the number of genome of each chromosom \r\n k: integer denoting the number of chromosomes in each population\r\n max_generation: integer denoting the maximum generation (iterations) of the algorithm\r\n crossover_probability: float in [0,1] denoting the crossover probability\r\n mutation_probability: float in [0,1] denoting the mutation probability\r\n path: string that indicates the path to the text file containing coordinates of n points\r\n Returns:\r\n None\r\n '''\r\n points = read_input(path, n)\r\n population = init_population(k, n)\r\n best_individual = population[0] # arbitrary choose a chromosome for initialization of best individual.\r\n best_fitness = -distance(points, best_individual, n) # setting -distance as fitness for best individual.\r\n old_best_fitness = best_fitness\r\n best_last_15 = deque([], maxlen=15) # queue with fixed size of 15\r\n\r\n for generation in range(1, max_generation + 1):\r\n # 1. We compute the fitness of each individual in current population.\r\n fitness = compute_population_fitness(population, points, k, n)\r\n # 2. We obtain the best individual so far together with its fitness.\r\n best_individual, best_fitness = find_best_individual(population, fitness, best_individual, best_fitness)\r\n # 3. We save the best last 15 individuals for plotting\r\n if old_best_fitness != best_fitness:\r\n old_best_fitness = best_fitness\r\n best_last_15.append((best_individual.copy(), -best_fitness))\r\n # 4. We create the next generation\r\n population = create_new_population(population, fitness, k, n, crossover_probability, mutation_probability)\r\n # 5. Prints best distance so far\r\n print(\"Generation = \", generation,'\\t',\"Path length = \",-best_fitness)\r\n\r\n solution = best_individual\r\n cycle_distance = -best_fitness\r\n\r\n\r\n #print(cycle_distance)\r\n #print(solution+1)\r\n #plot_results(best_last_15, points)\r\n return solution+1\r\n\r\n\r\n# General Parameters\r\n#n = 279\r\n#k = 120\r\n#max_generation = 500\r\n#crossover_probability = 0.99\r\n#mutation_probability = 0.01\r\n\r\n#path = 'nodes.txt'\r\n#TSP_genetic(n, k, max_generation, crossover_probability, mutation_probability, path)\r\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.xticks"
]
] |
zhu2014yi/CRPN
|
[
"5da5bf42eb8c86a17bdff52680c3827a2ef18590"
] |
[
"siamese_tracking/loss.py"
] |
[
"\nimport torch\nimport torch.nn as nn\n\n\n\ndef giou_bbox_overlaps(bboxes1, bboxes2, is_aligned=False):\n '''Calculate generative overlap between two set of bboxes.\n\n If ``is_aligned`` is ``False``, then calculate the ious between each bbox\n of bboxes1 and bboxes2, otherwise the ious between each aligned pair of\n bboxes1 and bboxes2.\n\n Args:\n bboxes1 (Tensor): shape (m, 4)\n bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n\n must be equal.\n\n Returns:\n gious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)\n '''\n\n rows = bboxes1.size(0)\n cols = bboxes2.size(0)\n if is_aligned:\n assert rows == cols\n if rows * cols == 0:\n return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)\n\n if is_aligned:\n overlaps_lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]\n overlaps_rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]\n overlaps_wh = (overlaps_rb - overlaps_lt ).clamp(min=0) # [rows, 2]\n\n closure_lt = torch.min(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]\n closure_rb = torch.max(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]\n\n closure_wh = (closure_rb - closure_lt).clamp(min=0) # [rows, 2]\n overlap = overlaps_wh[:, 0] * overlaps_wh[:, 1]\n closure = closure_wh[:, 0] * closure_wh[:, 1]\n area1 = (bboxes1[:, 2] - bboxes1[:, 0] ) * (\n bboxes1[:, 3] - bboxes1[:, 1] )\n\n area2 = (bboxes2[:, 2] - bboxes2[:, 0] ) * (\n bboxes2[:, 3] - bboxes2[:, 1] )\n union = (area1 + area2 - overlap)\n ious = overlap / union\n gious = ious - (closure-union)/closure\n else:\n raise NotImplementedError\n\n\n return gious\n\ndef giou_loss(pred, target, eps=1e-6):\n \"\"\"IoU loss.\n\n Computing the IoU loss between a set of predicted bboxes and target bboxes.\n The loss is calculated as negative log of IoU.\n\n Args:\n pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n shape (n, 4).\n target (Tensor): Corresponding gt bboxes, shape (n, 4).\n eps (float): Eps to avoid log(0).\n\n Return:\n Tensor: Loss tensor.\n \"\"\"\n gious = giou_bbox_overlaps(pred, target, is_aligned=True)#.clamp(min=eps)\n loss = 1.-gious\n return loss\n\n\nclass GIoULoss(nn.Module):\n\n def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):\n super(GIoULoss, self).__init__()\n self.eps = eps\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n def forward(self,\n pred,\n target,\n weight=None,\n avg_factor=None,\n reduction_override=None,\n **kwargs):\n if weight is not None and not torch.any(weight > 0):\n return (pred * weight).sum() # 0\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss = self.loss_weight * giou_loss(\n pred,\n target,\n weight,\n eps=self.eps,\n reduction=reduction,\n avg_factor=avg_factor,\n **kwargs)\n return loss\n\n\n\n\n"
] |
[
[
"torch.any",
"torch.min",
"torch.max"
]
] |
TakuTsuzuki/Growing-Neural-Cellular-Automata
|
[
"baa475842a72a2f318e76b5e52d738d8911b069d"
] |
[
"lib/CAModel.py"
] |
[
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nclass CAModel(nn.Module):\n # Network structure\n def __init__(self, channel_n, fire_rate, device, hidden_size=128):\n super(CAModel, self).__init__()\n\n self.device = device \n self.channel_n = channel_n\n\n self.fc0 = nn.Linear(channel_n*3, hidden_size)\n self.fc1 = nn.Linear(hidden_size, channel_n, bias=False)\n # Initializarion\n with torch.no_grad():\n self.fc1.weight.zero_()\n\n self.fire_rate = fire_rate\n self.to(self.device)\n\n # alpha channel: x: alpha values including neighbor cells \n def alive(self, x):\n return F.max_pool2d(x[:, 3:4, :, :], kernel_size=3, stride=1, padding=1) > 0.1\n \n # angle: angle of a target pattern. Usually 0.0\n def perceive(self, x, angle):\n\n def _perceive_with(x, weight):\n conv_weights = torch.from_numpy(weight.astype(np.float32)).to(self.device)\n conv_weights = conv_weights.view(1,1,3,3).repeat(self.channel_n, 1, 1, 1)\n return F.conv2d(x, conv_weights, padding=1, groups=self.channel_n)\n\n dx = np.outer([1, 2, 1], [-1, 0, 1]) / 8.0 # Sobel filter\n dy = dx.T\n c = np.cos(angle*np.pi/180)\n s = np.sin(angle*np.pi/180)\n w1 = c*dx-s*dy\n w2 = s*dx+c*dy\n\n y1 = _perceive_with(x, w1)\n y2 = _perceive_with(x, w2)\n y = torch.cat((x,y1,y2),1)\n return y\n\n def update(self, x, fire_rate, angle):\n x = x.transpose(1,3)\n pre_life_mask = self.alive(x)\n\n dx = self.perceive(x, angle)\n dx = dx.transpose(1,3)\n dx = self.fc0(dx)\n dx = F.relu(dx)\n dx = self.fc1(dx)\n\n if fire_rate is None:\n fire_rate=self.fire_rate\n stochastic = torch.rand([dx.size(0),dx.size(1),dx.size(2),1])>fire_rate\n stochastic = stochastic.float().to(self.device)\n dx = dx * stochastic\n\n x = x+dx.transpose(1,3)\n\n post_life_mask = self.alive(x)\n life_mask = (pre_life_mask & post_life_mask).float()\n x = x * life_mask\n return x.transpose(1,3)\n\n def forward(self, x, steps=1, fire_rate=None, angle=0.0):\n for step in range(steps):\n x = self.update(x, fire_rate, angle)\n return x\n"
] |
[
[
"torch.cat",
"torch.nn.functional.conv2d",
"numpy.cos",
"numpy.sin",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.no_grad",
"numpy.outer",
"torch.nn.functional.max_pool2d"
]
] |
eltociear/chaos_genius
|
[
"eb3bc27181c8af4144b95e685386814109173164",
"eb3bc27181c8af4144b95e685386814109173164"
] |
[
"chaos_genius/core/rca/root_cause_analysis.py",
"chaos_genius/alerts/base_alerts.py"
] |
[
"\"\"\"Provides RootCauseAnalysis class for computing RCA.\"\"\"\n\nimport warnings\nfrom itertools import combinations\nfrom math import isclose\nfrom textwrap import wrap\nfrom typing import Dict, List, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom chaos_genius.core.rca.constants import TIME_RANGES_BY_KEY\nfrom chaos_genius.core.rca.rca_utils.string_helpers import (\n convert_df_dims_to_query_strings,\n convert_query_string_to_user_string,\n)\nfrom chaos_genius.core.rca.rca_utils.waterfall_utils import (\n get_best_subgroups_using_superset_algo,\n get_waterfall_ylims,\n waterfall_plot_mpl,\n)\nfrom chaos_genius.core.utils.round import round_df, round_number\n\nSUPPORTED_AGGREGATIONS = [\"mean\", \"sum\", \"count\"]\nEPSILON = 1e-8\n\n\nclass RootCauseAnalysis:\n \"\"\"RCA Processor class which computes the RCA.\"\"\"\n\n def __init__(\n self,\n grp1_df: pd.DataFrame,\n grp2_df: pd.DataFrame,\n dims: List[str],\n metric: str,\n num_dim_combs: List[int] = None,\n agg: str = \"mean\",\n ) -> None:\n \"\"\"Initialize the RCA class.\n\n :param grp1_df: baseline dataframe\n :type grp1_df: pd.DataFrame\n :param grp2_df: rca/focus dataframe\n :type grp2_df: pd.DataFrame\n :param dims: list of dimensions to consider\n :type dims: List[str]\n :param metric: name of metric column\n :type metric: str\n :param num_dim_combs: which number of dimension combinations to\n consider, defaults to None\n :type num_dim_combs: List[int], optional\n :param agg: aggregation to use, defaults to \"mean\"\n :type agg: str, optional\n \"\"\"\n self._grp1_df = grp1_df\n self._grp2_df = grp2_df\n self._preprocess_rca_dfs()\n self._full_df = pd.concat([self._grp1_df, self._grp2_df])\n\n self._check_columns(dims)\n self._dims = dims\n\n self._check_columns(metric)\n self._metric = metric\n self._metric_is_cat = self._full_df[metric].dtype == object\n\n if agg not in SUPPORTED_AGGREGATIONS:\n raise ValueError(f\"Aggregation {agg} is not supported.\")\n self._agg = agg\n\n if num_dim_combs is None or not dims:\n num_dim_combs = list(range(1, len(dims) + 1))\n else:\n if max(num_dim_combs) > len(self._dims) or min(num_dim_combs) < 1:\n raise ValueError(f\"n {num_dim_combs} is out of range.\")\n if len(set(num_dim_combs)) != len(num_dim_combs):\n raise ValueError(f\"n {num_dim_combs} has duplicates.\")\n if len(num_dim_combs) > 4:\n warnings.warn(\n \"Passing more than 4 values for n will take a while.\"\n )\n self._num_dim_combs_to_consider = num_dim_combs\n\n self._impact_table = None\n self._waterfall_table = None\n\n self._max_waterfall_columns = 5\n self._max_subgroups_considered = 100\n\n def _initialize_impact_table(self):\n self._create_binned_columns()\n dim_combs_list = self._generate_all_dim_combinations()\n\n impacts = []\n for dim_comb in dim_combs_list:\n dim_comb_impact = self._compare_subgroups(dim_comb)\n impacts.append(dim_comb_impact)\n impact_table = pd.concat(impacts)\n\n # sort by absolute impact values\n impact_table = impact_table.sort_values(\n by=\"impact\",\n ascending=False,\n key=lambda x: x.abs(),\n ignore_index=True,\n )\n\n # add query string\n impact_table.loc[:, \"string\"] = impact_table[self._dims].apply(\n lambda inp: convert_df_dims_to_query_strings(inp), axis=1\n )\n\n # keeping only relevant features\n # impact_table.drop(self._dims, axis= 1, inplace= True)\n metric_columns = [\n \"impact\",\n \"val_g1\",\n \"val_g2\",\n \"size_g1\",\n \"size_g2\",\n \"count_g1\",\n \"count_g2\",\n ]\n impact_table = impact_table[[\"string\"] + self._dims + metric_columns]\n\n return impact_table\n\n def _get_single_dim_impact_table(self, single_dim):\n\n if self._impact_table is None:\n self._impact_table = self._initialize_impact_table()\n\n impact_table = self._impact_table.copy()\n other_dims = set(self._dims)\n other_dims.remove(single_dim)\n impact_table = impact_table[\n (~impact_table[single_dim].isna())\n & (impact_table[other_dims].isna().sum(axis=1) == len(other_dims))\n ]\n\n impact_table = impact_table.reset_index(drop=True)\n\n return impact_table\n\n def _initialize_waterfall_table(self, single_dim=None):\n\n if self._impact_table is None:\n self._impact_table = self._initialize_impact_table()\n\n # get impact values\n if single_dim is not None:\n impact_table = self._get_single_dim_impact_table(single_dim)\n else:\n impact_table = self._impact_table.copy()\n\n # getting subgroups for waterfall\n best_subgroups = get_best_subgroups_using_superset_algo(\n impact_table,\n self._max_waterfall_columns,\n self._max_subgroups_considered,\n )\n best_subgroups = best_subgroups[\n best_subgroups[\"ignored\"] == False # noqa E712\n ]\n best_subgroups = best_subgroups.merge(\n impact_table[[\"string\", \"impact\"]], how=\"inner\", on=\"string\"\n )\n best_subgroups[\"impact_non_overlap\"] = best_subgroups[\"impact\"]\n best_subgroups.rename(\n columns={\"impact\": \"impact_full_group\"}, inplace=True\n )\n best_subgroups[[\"indices_in_group\", \"non_overlap_indices\"]] = 0\n\n # calculate overlap values\n best_subgroups = self._get_overlap_values_for_waterfall(best_subgroups)\n\n return best_subgroups\n\n def _preprocess_rca_dfs(self):\n \"\"\"Preprocess dataframes for RCA Analysis.\"\"\"\n self._grp1_df = self._grp1_df.reset_index(drop=True)\n self._grp2_df = self._grp2_df.reset_index(drop=True)\n self._grp2_df.index = self._grp2_df.index + len(self._grp1_df)\n\n def _check_columns(self, cols):\n if isinstance(cols, str):\n cols = [cols]\n for col in cols:\n if col not in self._full_df.columns:\n raise ValueError(f\"Column {col} not in data.\")\n\n def _create_binned_columns(self):\n non_cat_cols = self._full_df.dtypes[self._dims][\n self._full_df.dtypes[self._dims] != object\n ]\n\n for col in non_cat_cols.index:\n binned_values = pd.qcut(\n self._full_df[col], 4, duplicates=\"drop\"\n ).astype(str)\n self._full_df[col] = binned_values\n\n self._grp1_df = self._full_df.loc[self._grp1_df.index]\n self._grp2_df = self._full_df.loc[self._grp2_df.index]\n\n def _generate_all_dim_combinations(self) -> List[List[str]]:\n \"\"\"Create a dictionary of all possible combinations of dims.\n\n Returns:\n List[List[str]]: Returns a list of all possible subgroups\n \"\"\"\n list_subgroups = []\n for i in self._num_dim_combs_to_consider:\n list_subgroups_of_level = list(\n map(list, combinations(self._dims, i))\n )\n list_subgroups.extend(list_subgroups_of_level)\n return list_subgroups\n\n def _calculate_subgroup_values(self, data, suffix):\n agg_name = self._agg + suffix\n count_name = \"count\" + suffix\n if self._agg == \"mean\":\n value_numerator = data[agg_name] * data[count_name]\n value_denominator = data[count_name].sum() + EPSILON\n value = value_numerator / value_denominator\n elif self._agg in [\"sum\", \"count\"]:\n value = data[agg_name]\n else:\n raise ValueError(f\"Aggregation {self._agg} is not defined.\")\n\n size = data[count_name] * 100 / (data[count_name].sum() + EPSILON)\n\n return value, size\n\n def _compare_subgroups(self, dim_comb: List[str]) -> pd.DataFrame:\n agg_list = [self._agg, \"count\"] if self._agg != \"count\" else [\"count\"]\n\n grp1_df = (\n self._grp1_df.groupby(dim_comb)[self._metric]\n .agg(agg_list)\n .reset_index()\n )\n grp2_df = (\n self._grp2_df.groupby(dim_comb)[self._metric]\n .agg(agg_list)\n .reset_index()\n )\n\n combined_df = grp1_df.merge(\n grp2_df, how=\"outer\", on=dim_comb, suffixes=[\"_g1\", \"_g2\"]\n ).fillna(0)\n\n for i, suffix in enumerate([\"_g1\", \"_g2\"]):\n agg_name = self._agg + suffix\n count_name = \"count\" + suffix\n\n if self._agg == \"mean\":\n value_numerator = (\n combined_df[agg_name] * combined_df[count_name]\n )\n value_denominator = combined_df[count_name].sum() + EPSILON\n value = value_numerator / value_denominator\n elif self._agg in [\"sum\", \"count\"]:\n value = combined_df[agg_name]\n else:\n raise ValueError(f\"Aggregation {self._agg} is not defined.\")\n\n combined_df[\"val\" + suffix] = value\n combined_df[\"size\" + suffix] = combined_df[count_name] * 100\n if i == 0:\n combined_df[\"size\" + suffix] /= len(self._grp1_df) + EPSILON\n elif i == 1:\n combined_df[\"size\" + suffix] /= len(self._grp2_df) + EPSILON\n\n (\n combined_df[\"val_g1\"],\n combined_df[\"size_g1\"],\n ) = self._calculate_subgroup_values(combined_df, \"_g1\")\n (\n combined_df[\"val_g2\"],\n combined_df[\"size_g2\"],\n ) = self._calculate_subgroup_values(combined_df, \"_g2\")\n\n combined_df[\"impact\"] = combined_df[\"val_g2\"] - combined_df[\"val_g1\"]\n\n return combined_df\n\n def _get_overlap_values_for_waterfall(\n self,\n subgroups_df: pd.DataFrame,\n ):\n subgroups_df_output = subgroups_df.copy()\n len_d1 = self._grp1_df[self._metric].count()\n len_d2 = self._grp2_df[self._metric].count()\n\n for subgroup in subgroups_df_output[\"string\"]:\n all_indices = set()\n\n # others are all subgroups minus the current subgroup\n other_subgroups = subgroups_df_output[\"string\"].values.tolist()\n other_subgroups.remove(subgroup)\n other_combinations = {\n i: combinations(other_subgroups, i)\n for i in range(1, len(subgroups_df_output))\n }\n\n d1_idxs = set(self._grp1_df.query(subgroup).index)\n d2_idxs = set(self._grp2_df.query(subgroup).index)\n\n overlap_indices_count = 0\n curr_loc = 0\n\n for i in range(1, len(subgroups_df_output)):\n for combo in other_combinations[i]:\n query = \" and \".join(combo)\n d1_combo = set(self._grp1_df.query(query).index)\n d2_combo = set(self._grp2_df.query(query).index)\n overlap_points_d1 = (\n d1_idxs.intersection(d1_combo) - all_indices\n )\n overlap_points_d2 = (\n d2_idxs.intersection(d2_combo) - all_indices\n )\n\n overlap_indices_count += len(overlap_points_d1) + len(\n overlap_points_d2\n )\n\n t_d1 = self._grp1_df.loc[overlap_points_d1]\n t_d2 = self._grp2_df.loc[overlap_points_d2]\n if self._agg == \"mean\":\n grp1_val = (\n t_d1[self._metric].mean()\n * t_d1[self._metric].count()\n / len_d1\n )\n grp2_val = (\n t_d2[self._metric].mean()\n * t_d2[self._metric].count()\n / len_d2\n )\n elif self._agg == \"sum\":\n grp1_val = t_d1[self._metric].sum()\n grp2_val = t_d2[self._metric].sum()\n elif self._agg == \"count\":\n grp1_val = t_d1[self._metric].count()\n grp2_val = t_d2[self._metric].count()\n\n overlap_impact = grp2_val - grp1_val\n if np.isnan(overlap_impact):\n overlap_impact = 0\n curr_loc = subgroups_df_output[\n subgroups_df_output[\"string\"] == subgroup\n ].index[0]\n\n subgroups_df_output.loc[\n curr_loc, \"impact_non_overlap\"\n ] = subgroups_df_output.loc[\n curr_loc, \"impact_non_overlap\"\n ] - (\n overlap_impact * len(combo) / (len(combo) + 1)\n )\n\n all_indices = all_indices.union(overlap_points_d1).union(\n overlap_points_d2\n )\n\n subgroups_df_output.loc[curr_loc, \"indices_in_group\"] = len(\n d1_idxs\n ) + len(d2_idxs)\n\n subgroups_df_output.loc[curr_loc, \"non_overlap_indices\"] = (\n len(d1_idxs) + len(d2_idxs) - overlap_indices_count\n )\n\n return subgroups_df_output\n\n def _get_waterfall_output_data(\n self,\n df_subgroups: pd.DataFrame,\n word_wrap_num: int,\n plot_in_mpl: bool,\n ) -> Tuple[Tuple[float, float], pd.DataFrame]:\n\n d1_agg = self._grp1_df[self._metric].agg(self._agg)\n d2_agg = self._grp2_df[self._metric].agg(self._agg)\n d1_agg = 0 if pd.isna(d1_agg) else d1_agg\n d2_agg = 0 if pd.isna(d2_agg) else d2_agg\n impact = d2_agg - d1_agg\n non_overlap_impact = df_subgroups[\"impact_non_overlap\"].sum()\n\n waterfall_df = df_subgroups[[\"string\", \"impact_non_overlap\"]].copy()\n others_impact = impact - non_overlap_impact\n # only if impact of others is not close to 0, we add it\n if not isclose(others_impact, 0, rel_tol=0.0001, abs_tol=EPSILON):\n waterfall_df = waterfall_df.append(\n {\"string\": \"others\", \"impact_non_overlap\": others_impact},\n ignore_index=True,\n )\n\n col_names_for_mpl = [\n \"start\",\n *[\n \"\\n\".join(wrap(i, word_wrap_num))\n for i in waterfall_df[\"string\"].values.tolist()\n ],\n ]\n col_values = [\n d1_agg,\n *waterfall_df[\"impact_non_overlap\"].values.tolist(),\n ]\n col_names_for_mpl.append(\"end\")\n col_values.append(d2_agg)\n\n y_axis_lims = get_waterfall_ylims(\n pd.DataFrame(\n data={self._metric: col_values}, index=col_names_for_mpl\n ),\n self._metric,\n )\n\n if plot_in_mpl:\n print(\"plot\")\n waterfall_plot_mpl(\n pd.DataFrame(\n data={self._metric: col_values}, index=col_names_for_mpl\n ),\n self._metric,\n y_axis_lims,\n )\n plt.show()\n\n # Calculate steps for each subgroup\n col_values = (\n col_values[:1]\n + [sum(col_values[: i + 1]) for i in range(1, len(col_values) - 1)]\n + col_values[-1:]\n )\n\n js_df = pd.DataFrame(\n data={\n \"value\": col_values,\n \"category\": [\"start\"]\n + waterfall_df[\"string\"].values.tolist()\n + [\"end\"],\n \"stepValue\": col_values,\n }\n )\n\n js_df[\"open\"] = js_df[\"value\"].shift(1, fill_value=0)\n\n js_df[\"color\"] = [\n \"#FA5252\" if val <= 0 else \"#05A677\"\n for val in [0]\n + waterfall_df[\"impact_non_overlap\"].values.tolist()\n + [0]\n ]\n\n js_df.loc[[0, len(js_df) - 1], [\"open\", \"color\"]] = [\n [0, \"#778CA3\"],\n [0, \"#778CA3\"],\n ]\n\n js_df[\"displayValue\"] = js_df[\"value\"] - js_df[\"open\"]\n\n return y_axis_lims, js_df\n\n def _get_best_subgroups_waterfall(\n self,\n single_dim,\n max_waterfall_columns,\n max_subgroups_considered,\n ):\n recalc = False\n if (\n max_waterfall_columns is not None\n and max_waterfall_columns != self._max_waterfall_columns\n ):\n\n recalc = True\n self._max_waterfall_columns = max_waterfall_columns\n\n if (\n max_subgroups_considered is not None\n and max_subgroups_considered != self._max_subgroups_considered\n ):\n\n recalc = True\n self._max_subgroups_considered = max_subgroups_considered\n\n if single_dim is None:\n if self._waterfall_table is None or recalc:\n self._waterfall_table = self._initialize_waterfall_table(\n single_dim\n )\n best_subgroups = self._waterfall_table.copy()\n else:\n best_subgroups = self._initialize_waterfall_table(single_dim)\n\n best_subgroups.drop(\"ignored\", axis=1, inplace=True)\n\n return best_subgroups\n\n def get_panel_metrics(self) -> Dict[str, float]:\n \"\"\"Return panel metrics for the KPI.\n\n :return: Dictionary with metrics\n :rtype: Dict[str, float]\n \"\"\"\n g1 = self._grp1_df[self._metric]\n g2 = self._grp2_df[self._metric]\n # set aggregations to 0 if data is empty\n g1_agg = g1.agg(self._agg) if len(g1) > 0 else 0\n g2_agg = g2.agg(self._agg) if len(g2) > 0 else 0\n impact = g2_agg - g1_agg\n perc_diff = (impact / g1_agg) * 100 if g1_agg != 0 else np.inf\n\n panel_metrics = {\n \"group1_value\": round_number(g1_agg),\n \"group2_value\": round_number(g2_agg),\n \"difference\": round_number(impact),\n \"perc_change\": round_number(perc_diff)\n if not np.isinf(perc_diff)\n else \"inf\",\n }\n\n # Check for None or NaN values in output\n for k, v in panel_metrics.items():\n if v is None or pd.isna(v):\n raise ValueError(f\"{k} with value: {v} is either None or NaN\")\n\n return panel_metrics\n\n def get_impact_rows(\n self, single_dim: str = None\n ) -> List[Dict[str, object]]:\n \"\"\"Return impact dataframe as a list.\n\n :param single_dim: dimension to use, defaults to None\n :type single_dim: str, optional\n :return: list with rows of impact table\n :rtype: List[Dict[str, object]]\n \"\"\"\n if self._impact_table is None:\n self._impact_table = self._initialize_impact_table()\n\n impact_table = self._impact_table.copy()\n\n if single_dim is not None:\n impact_table = impact_table[~impact_table[single_dim].isna()]\n impact_table = impact_table.reset_index(drop=True)\n\n impact_table.drop(self._dims, axis=1, inplace=True)\n\n impact_table[\"string\"] = impact_table[\"string\"].apply(\n convert_query_string_to_user_string\n )\n\n # Check for any nan values in impact values and raise ValueError if found\n self._check_nan(\n impact_table, f\"Impact table for dimension {single_dim}\"\n )\n\n return round_df(impact_table).to_dict(\"records\")\n\n def get_impact_column_map(\n self, timeline: str = \"last_30_days\"\n ) -> List[Dict[str, str]]:\n \"\"\"Return a mapping of column names to values for UI.\n\n :param timeline: timeline to use, defaults to \"last_30_days\"\n :type timeline: str, optional\n :return: List of mappings\n :rtype: List[Dict[str, str]]\n \"\"\"\n prev_timestr = TIME_RANGES_BY_KEY[timeline][\"last_period_name\"]\n curr_timestr = TIME_RANGES_BY_KEY[timeline][\"current_period_name\"]\n\n mapping = [\n (\"subgroup\", \"Subgroup Name\"),\n (\"g1_agg\", f\"{prev_timestr} Value\"),\n (\"g1_count\", f\"{prev_timestr} Count (#)\"),\n (\"g1_size\", f\"{prev_timestr} Size (%)\"),\n (\"g2_agg\", f\"{curr_timestr} Value\"),\n (\"g2_count\", f\"{curr_timestr} Count (#)\"),\n (\"g2_size\", f\"{curr_timestr} Size (%)\"),\n (\"impact\", \"Impact\"),\n ]\n\n mapping = [{\"title\": v, \"field\": k} for k, v in mapping]\n\n return mapping\n\n def get_waterfall_table_rows(\n self,\n single_dim: str = None,\n max_waterfall_columns: int = None, # defaults to 5 or last value\n max_subgroups_considered: int = None, # defaults to 100 or last value\n ) -> List[Dict]:\n \"\"\"Return rows for the waterfall table.\n\n :param single_dim: dimension to use, defaults to None\n :type single_dim: str, optional\n :param max_waterfall_columns: max columns in waterfall, defaults to\n None\n :type max_waterfall_columns: int, optional\n :return: list of all rows in table\n :rtype: List[Dict]\n \"\"\"\n best_subgroups = self._get_best_subgroups_waterfall(\n single_dim, max_waterfall_columns, max_subgroups_considered\n )\n\n best_subgroups[\"string\"] = best_subgroups[\"string\"].apply(\n convert_query_string_to_user_string\n )\n\n # Check for any nan values in best subgroups and raise ValueError if found\n self._check_nan(\n best_subgroups, f\"Waterfall table for dimension {single_dim}\"\n )\n\n return round_df(best_subgroups).to_dict(\"records\")\n\n def get_waterfall_plot_data(\n self,\n single_dim: str = None,\n plot_in_mpl: bool = False,\n word_wrap_num: int = 15,\n max_waterfall_columns: int = None, # defaults to 5 or last value\n max_subgroups_considered: int = None, # defaults to 100 or last value\n ) -> Tuple[List[Dict], List[float]]:\n \"\"\"Return plot data for waterfall chart.\n\n :param single_dim: dimension to use, defaults to None\n :type single_dim: str, optional\n :param plot_in_mpl: flag to plot in matplotlib, defaults to False\n :type plot_in_mpl: bool, optional\n :param word_wrap_num: wordwrapping for columns, defaults to 15\n :type word_wrap_num: int, optional\n :param max_waterfall_columns: max columns in waterfall, defaults to\n None\n :type max_waterfall_columns: int, optional\n :return: plot data for waterfall chart\n :rtype: Tuple[List[Dict], List[float, float]]\n \"\"\"\n best_subgroups = self._get_best_subgroups_waterfall(\n single_dim, max_waterfall_columns, max_subgroups_considered\n )\n\n # get waterfall chart data\n y_axis_lims, waterfall_df = self._get_waterfall_output_data(\n best_subgroups, word_wrap_num, plot_in_mpl\n )\n\n # convert query strings to user strings\n waterfall_df[\"category\"] = waterfall_df[\"category\"].apply(\n convert_query_string_to_user_string\n )\n\n # Check for any nan values in waterfall df and raise ValueError if found\n self._check_nan(\n waterfall_df, f\"Waterfall chart for dimension {single_dim}\"\n )\n\n return (\n round_df(waterfall_df).to_dict(\"records\"),\n [round_number(i) for i in y_axis_lims],\n )\n\n def get_hierarchical_table(\n self,\n single_dim: str,\n max_depth: int = 3,\n max_children: int = 5,\n max_parents: int = 5,\n ) -> List[Dict]:\n \"\"\"Return rows for hierarchical table.\n\n :param single_dim: dimension to use\n :type single_dim: str\n :param max_depth: maximum depth for the hierarchy, defaults to 3\n :type max_depth: int, optional\n :param max_children: max children per row, defaults to 5\n :type max_children: int, optional\n :param max_parents: max first level rows, defaults to 5\n :type max_parents: int, optional\n :return: list of rows for the table\n :rtype: List[Dict]\n \"\"\"\n other_dims = self._dims[:]\n other_dims.remove(single_dim)\n\n impact_table = self._initialize_impact_table()\n impact_table[\"parentId\"] = None\n # impact_table[\"id\"] = impact_table.index\n impact_table[\"depth\"] = None\n\n output_table = self._get_single_dim_impact_table(single_dim)\n\n output_table = output_table.iloc[:max_parents]\n\n output_table[\"depth\"] = 1\n\n for depth in range(1, max_depth):\n parents = output_table[output_table[\"depth\"] == depth]\n for index, row in parents.iterrows():\n string = row[\"string\"]\n filters = string.split(\" and \")\n children = impact_table\n for filter_string in filters:\n children = children[\n children[\"string\"].str.contains(\n filter_string, regex=False\n )\n ]\n children = children[\n children[other_dims].isna().sum(axis=1)\n == len(other_dims) - depth\n ]\n children = children.iloc[:max_children]\n children[\"depth\"] = depth + 1\n children[\"parentId\"] = index\n output_table = output_table.append(children, ignore_index=True)\n\n output_table.drop(self._dims, axis=1, inplace=True)\n\n output_table = output_table.reset_index().rename(\n columns={\"index\": \"id\"}\n )\n\n output_table[\"string\"] = output_table[\"string\"].apply(\n convert_query_string_to_user_string\n )\n\n # Check for any nan values in output table and raise ValueError if found\n self._check_nan(\n output_table.drop(\"parentId\", axis=1),\n f\"Hierarchical table for dimension {single_dim}\",\n )\n\n return round_df(output_table).to_dict(\"records\")\n\n def _check_nan(self, df: pd.DataFrame, message: str) -> None:\n \"\"\"Check if NaN values in dataframe.\"\"\"\n nan_df = df.isna().sum()\n nan_dict: dict = nan_df[nan_df > 0].to_dict()\n\n if nan_dict:\n raise ValueError(f\"{message} contains NaN values. {nan_dict}\")\n",
"import logging\nimport os\nimport io\nfrom typing import Optional, List, Tuple, Union\nimport pandas as pd\nimport time\nimport datetime\nfrom datetime import date\nfrom copy import deepcopy\nfrom chaos_genius.utils.io_helper import is_file_exists\nfrom chaos_genius.core.utils.round import round_number\nfrom chaos_genius.databases.models.data_source_model import DataSource\nfrom chaos_genius.databases.models.alert_model import Alert\nfrom chaos_genius.databases.models.anomaly_data_model import AnomalyDataOutput\nfrom chaos_genius.databases.models.triggered_alerts_model import TriggeredAlerts\nfrom chaos_genius.databases.models.kpi_model import Kpi\n\n# from chaos_genius.connectors.base_connector import get_df_from_db_uri\nfrom chaos_genius.connectors import get_sqla_db_conn\nfrom chaos_genius.alerts.email import send_static_alert_email\nfrom chaos_genius.alerts.slack import anomaly_alert_slack, event_alert_slack\nfrom chaos_genius.alerts.utils import count_anomalies, top_anomalies\nfrom chaos_genius.alerts.anomaly_alert_config import (\n ANOMALY_TABLE_COLUMN_NAMES_MAPPER,\n IGNORE_COLUMNS_ANOMALY_TABLE,\n ANOMALY_ALERT_COLUMN_NAMES,\n ANOMALY_TABLE_COLUMNS_HOLDING_FLOATS\n)\nfrom chaos_genius.alerts.constants import ALERT_DATE_FORMAT\nfrom chaos_genius.alerts.utils import count_anomalies, save_anomaly_point_formatting, top_anomalies, webapp_url_prefix\nfrom chaos_genius.core.rca.rca_utils.string_helpers import convert_query_string_to_user_string\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\nfrom tabulate import tabulate\n\nlogger = logging.getLogger()\n\nFREQUENCY_DICT = {\n \"weekly\": datetime.timedelta(days=7, hours=0, minutes=0),\n \"daily\": datetime.timedelta(days=1, hours=0, minutes=0),\n \"hourly\": datetime.timedelta(days=0, hours=1, minutes=0),\n \"every_15_minute\": datetime.timedelta(days=0, hours=0, minutes=15),\n \"every_minute\": datetime.timedelta(days=0, hours=0, minutes=1),\n}\n\n\nclass StaticEventAlertController:\n \"\"\"This is the controller class for the static events\n\n Raises:\n Exception: Raise if the alert if not found\n Exception: Raise if alert settings isn't configured properly\n\n Returns:\n object: object of the class\n \"\"\"\n\n PICKLE_DIR = \".alert\"\n\n def __init__(self, alert_info: dict, data_source_info: dict):\n \"\"\"Initiate the static event controller class\n\n Args:\n alert_info (dict): alert information\n data_source_info (dict): data_source_info for the corresponding data connection\n\n Raises:\n Exception: Raise if Alert id not found\n \"\"\"\n self.alert_info = alert_info\n self.data_source_info = data_source_info\n self.alert_id = alert_info[\"id\"]\n if not self.alert_id:\n raise Exception(\"Alert ID is required\")\n self.pickle_file_path = f\"{self.PICKLE_DIR}/{self.alert_id}.pkl\"\n self.load_pickled_df()\n self.load_query_data()\n\n def load_pickled_df(self):\n \"\"\"Load the pickled dataframe of the given alert id\"\"\"\n os.makedirs(f\"./{self.PICKLE_DIR}\", exist_ok=True)\n full_path = is_file_exists(self.pickle_file_path)\n if full_path:\n self.unpickled_df = pd.read_pickle(full_path)\n else:\n self.unpickled_df = pd.DataFrame()\n\n def pickle_df(self):\n \"\"\"Pickle and save the current dataframe state\"\"\"\n pickle_path = f\"./{self.pickle_file_path}\"\n self.query_df.to_pickle(pickle_path)\n\n def load_query_data(self):\n \"\"\"Load the query data from the data source\"\"\"\n db_connection = get_sqla_db_conn(data_source_info=self.data_source_info)\n self.query_df = db_connection.run_query(self.alert_info[\"alert_query\"])\n # self.query_df = get_df_from_db_uri(self.data_source_info, self.alert_info['alert_query'])\n\n def check_and_prepare_alert(self):\n \"\"\"Check for the alert and trigger the appropriate trigger\n\n Raises:\n Exception: Raise if the alert settings not found\n \"\"\"\n\n alert: Optional[Alert] = Alert.get_by_id(self.alert_info[\"id\"])\n curr_date_time = datetime.datetime.now()\n alert.update(commit=True, last_alerted=curr_date_time)\n\n change_df = pd.DataFrame()\n if self.alert_info[\"alert_settings\"] == \"new_entry_alert\":\n change_df = self.test_new_entry(self.query_df, self.unpickled_df)\n elif self.alert_info[\"alert_settings\"] == \"change_alert\":\n change_df = self.test_change_entry(self.query_df, self.unpickled_df)\n elif self.alert_info[\"alert_settings\"] == \"always_alert\":\n change_df = self.query_df\n elif self.alert_info[\"alert_settings\"] == \"missing_data_alert\":\n change_df = self.query_df\n else:\n raise Exception(\"Alert Setting isn't configured\")\n\n outcome = False\n alert_data = None\n\n if (\n not change_df.empty\n and self.alert_info[\"alert_settings\"] != \"missing_data_alert\"\n ):\n if self.alert_info[\"alert_channel\"] == \"email\":\n outcome, alert_data = self.prepare_email(change_df)\n elif self.alert_info[\"alert_channel\"] == \"slack\":\n outcome, alert_data = self.send_slack_event_alert(change_df)\n\n # send the missing data alert with different template\n if (\n change_df.empty\n and self.alert_info[\"alert_settings\"] == \"missing_data_alert\"\n ):\n if self.alert_info[\"alert_channel\"] == \"email\":\n outcome, alert_data = self.send_missing_data_email_alert()\n elif self.alert_info[\"alert_channel\"] == \"slack\":\n outcome, alert_data = self.send_slack_event_alert(change_df)\n\n self.pickle_df()\n\n if alert_data is None:\n return outcome\n \n alert_metadata = {\n \"alert_frequency\": self.alert_info[\"alert_frequency\"],\n \"alert_data\": alert_data,\n } \n\n triggered_alert = TriggeredAlerts(\n alert_conf_id=self.alert_info[\"id\"],\n alert_type=\"Event Alert\",\n is_sent=outcome,\n created_at=datetime.datetime.now(),\n alert_metadata=alert_metadata \n )\n\n triggered_alert.update(commit=True)\n return outcome\n\n\n @staticmethod\n def test_new_entry(new_df, old_df):\n \"\"\"Test if some new record is added in the given table/query\n\n Args:\n new_df (DataFrame): Latest query data fetched from the linked database\n old_df (DataFrame): Last state of the table/query stored in the pickled dataframe\n\n Returns:\n DataFrame: Return the dataframe only with the new entry\n \"\"\"\n if old_df.empty:\n return new_df\n change = new_df.merge(old_df, how=\"outer\", indicator=True).loc[\n lambda x: x[\"_merge\"] == \"left_only\"\n ]\n return change.drop(columns=[\"_merge\"])\n\n @staticmethod\n def test_change_entry(new_df, old_df):\n \"\"\"Test if some new record is added or deleted in the given table/query\n\n Args:\n new_df (DataFrame): Latest query data fetched from the linked database\n old_df (DataFrame): Last state of the table/query stored in the pickled dataframe\n\n Returns:\n DataFrame: Return the dataframe only with the changes\n \"\"\"\n if old_df.empty:\n new_df[\"change\"] = \"added\"\n return new_df\n added_rows = new_df.merge(old_df, how=\"outer\", indicator=True).loc[\n lambda x: x[\"_merge\"] == \"left_only\"\n ]\n added_rows[\"change\"] = \"added\"\n deleted_rows = new_df.merge(old_df, how=\"outer\", indicator=True).loc[\n lambda x: x[\"_merge\"] == \"right_only\"\n ]\n deleted_rows[\"change\"] = \"deleted\"\n return pd.concat([added_rows, deleted_rows])\n\n def prepare_email(self, change_df):\n \"\"\"Prepare the email subject, body and CSV attachment for trigger\n\n Args:\n change_df (DataFrame): Dataframe with only the rows with change\n \"\"\"\n\n alert_channel_conf = self.alert_info[\"alert_channel_conf\"]\n\n if type(alert_channel_conf) != dict:\n logger.debug(\n f\"The alert channel configuration is incorrect for Alert ID - {self.alert_info['id']}\"\n )\n return False\n\n recipient_emails = alert_channel_conf.get(\"email\", [])\n\n if recipient_emails:\n subject = f\"{self.alert_info['alert_name']} - Chaos Genius Event Alert❗\"\n message = self.alert_info[\"alert_message\"]\n files = []\n if not change_df.empty:\n file_detail = {}\n file_detail[\"fname\"] = \"data.csv\"\n with io.StringIO() as buffer:\n change_df.to_csv(buffer, index=False)\n file_detail[\"fdata\"] = buffer.getvalue()\n files = [file_detail]\n\n column_names = list(change_df.columns)[:4]\n add_df = []\n del_df = []\n normal_df = []\n\n if self.alert_info[\"alert_settings\"] == \"new_entry_alert\":\n normal_df = list(change_df.head().T.to_dict().values())\n elif self.alert_info[\"alert_settings\"] == \"change_alert\":\n del_df = list(\n change_df[change_df[\"change\"] == \"deleted\"]\n .head()\n .T.to_dict()\n .values()\n )\n add_df = list(\n change_df[change_df[\"change\"] == \"added\"]\n .head()\n .T.to_dict()\n .values()\n )\n elif self.alert_info[\"alert_settings\"] == \"always_alert\":\n normal_df = list(change_df.head().T.to_dict().values())\n\n test = self.send_template_email(\n \"email_event_alert.html\",\n recipient_emails,\n subject,\n files,\n add_df=add_df,\n del_df=del_df,\n normal_df=normal_df,\n column_names=column_names,\n alert_message=message,\n alert_frequency=self.alert_info[\"alert_frequency\"].capitalize(),\n alert_name=self.alert_info[\"alert_name\"],\n preview_text=\"Static Event Alert\",\n )\n\n alert_data = list(change_df.T.astype(str).to_dict().values())\n return test, alert_data\n else:\n logger.info(\n f\"No email recipients available for Alert ID - {self.alert_info['id']}\"\n )\n return False, None\n\n def send_template_email(self, template, recipient_emails, subject, files, **kwargs):\n \"\"\"Sends an email using a template.\"\"\"\n\n path = os.path.join(os.path.dirname(__file__), \"email_templates\")\n env = Environment(\n loader=FileSystemLoader(path), autoescape=select_autoescape([\"html\", \"xml\"])\n )\n\n template = env.get_template(template)\n test = send_static_alert_email(\n recipient_emails, subject, template.render(**kwargs), self.alert_info, files\n )\n\n if test == True:\n logger.info(\n f\"The email for Alert ID - {self.alert_info['id']} was successfully sent\"\n )\n else:\n logger.debug(\n f\"The email for Alert ID - {self.alert_info['id']} was not sent\"\n )\n return test\n\n def send_slack_event_alert(self, change_df):\n \"\"\"Sends a slack alert\"\"\"\n\n alert_name = self.alert_info[\"alert_name\"]\n alert_frequency = self.alert_info[\"alert_frequency\"]\n alert_message = self.alert_info[\"alert_message\"]\n alert_overview = \"\"\n\n if self.alert_info[\"alert_settings\"] == \"new_entry_alert\":\n alert_overview = f\"Number of rows added: {change_df.shape[0]}\"\n elif self.alert_info[\"alert_settings\"] == \"change_alert\":\n added_rows = change_df[change_df.change == \"added\"].shape[0]\n deleted_rows = change_df[change_df.change == \"deleted\"].shape[0]\n alert_overview = f\"Number of rows added: {added_rows} and Number of rows deleted: {deleted_rows}\"\n elif self.alert_info[\"alert_settings\"] == \"always_alert\":\n alert_overview = f\"Number of rows present: {change_df.shape[0]}\"\n\n test = event_alert_slack(\n alert_name, alert_frequency, alert_message, alert_overview\n )\n\n if test == \"ok\":\n logger.info(\n f\"The slack alert for Alert ID - {self.alert_info['id']} was successfully sent\"\n )\n else:\n logger.info(\n f\"The slack alert for Alert ID - {self.alert_info['id']} has not been sent\"\n )\n\n message = f\"Status for KPI ID - {self.alert_info['kpi']}: {test}\"\n logger.info(\n message\n )\n\n test = test == \"ok\"\n alert_data = list(change_df.T.astype(str).to_dict().values())\n return test, alert_data\n\n def send_missing_data_email_alert(self):\n alert_channel_conf = self.alert_info[\"alert_channel_conf\"]\n recipient_emails = alert_channel_conf.get(\"email\", [])\n subject = f\"{self.alert_info['alert_name']} - Chaos Genius Event Alert❗\"\n if not recipient_emails:\n return False, None\n test = self.send_template_email(\n template=\"missing_data_alert.html\",\n recipient_emails=recipient_emails,\n subject=subject,\n files=[],\n alert_message=self.alert_info.get(\"alert_message\", \"\"),\n alert_frequency=self.alert_info.get(\"alert_frequency\", \"\").capitalize(),\n alert_name=self.alert_info.get(\"alert_name\", \"\"),\n preview_text=\"Missing Data Alert\",\n )\n\n return test, []\n\n def send_missing_data_slack_alert(self):\n test = event_alert_slack(\n alert_name=self.alert_info[\"alert_name\"],\n alert_frequency=self.alert_info[\"alert_frequency\"],\n alert_message=self.alert_info[\"alert_message\"],\n alert_overview=''\n )\n\n if test == \"ok\":\n logger.info(\n f\"The slack alert for Alert ID - {self.alert_info['id']} was successfully sent\"\n )\n else:\n logger.info(\n f\"The slack alert for Alert ID - {self.alert_info['id']} has not been sent\"\n )\n\n message = f\"Status for KPI ID - {self.alert_info['kpi']}: {test}\"\n logger.info(\n message\n )\n\n test = test == \"ok\"\n return test, []\n\nclass AnomalyAlertController:\n def __init__(self, alert_info, anomaly_end_date=None):\n self.alert_info = alert_info\n self.now = datetime.datetime.now()\n if anomaly_end_date:\n self.anomaly_end_date = anomaly_end_date\n else:\n self.anomaly_end_date = self.now - datetime.timedelta(days=3)\n\n def check_and_prepare_alert(self):\n kpi_id = self.alert_info[\"kpi\"]\n alert_id = self.alert_info[\"id\"]\n alert: Optional[Alert] = Alert.get_by_id(self.alert_info[\"id\"])\n if alert is None:\n logger.info(f\"Could not find alert by ID: {self.alert_info['id']}\")\n return False\n\n check_time = FREQUENCY_DICT[self.alert_info[\"alert_frequency\"]]\n fuzzy_interval = datetime.timedelta(\n minutes=30\n ) # this represents the upper bound of the time interval that an alert can fall short of the check_time hours before which it can be sent again\n if (\n alert.last_alerted is not None\n and alert.last_alerted > (self.now - check_time)\n and alert.last_alerted > ((self.now + fuzzy_interval) - check_time)\n ):\n # this check works in three steps\n # 1) Verify if the last alerted value of an alert is not None\n # 2) Verify if less than check_time hours have elapsed since the last alert was sent\n # 3) If less than check_time hours have elapsed, check if the additonal time to complete check_time hours is greater than fuzzy_interval\n logger.info(\n f\"Skipping alert with ID {self.alert_info['id']} since it was already run\"\n )\n return True\n alert.update(commit=True, last_alerted=self.now)\n\n # TODO: Add the series type filter for query optimisation\n anomaly_data = AnomalyDataOutput.query.filter(\n AnomalyDataOutput.kpi_id == kpi_id,\n AnomalyDataOutput.anomaly_type.in_([\"overall\", \"subdim\"]),\n AnomalyDataOutput.is_anomaly.in_([1,-1]),\n AnomalyDataOutput.data_datetime >= self.anomaly_end_date,\n AnomalyDataOutput.severity >= self.alert_info[\"severity_cutoff_score\"]\n ).all()\n\n if len(anomaly_data) == 0:\n logger.info(f\"No anomaly exists (Alert ID - {alert_id})\")\n return True\n\n logger.info(f\"Alert ID {alert_id} is sent to the respective alert channel\")\n\n if self.alert_info[\"alert_channel\"] == \"email\":\n outcome, alert_data = self.send_alert_email(anomaly_data)\n elif self.alert_info[\"alert_channel\"] == \"slack\":\n outcome, alert_data = self.send_slack_alert(anomaly_data)\n \n if alert_data is None:\n return outcome\n \n alert_metadata = {\n \"alert_frequency\": self.alert_info[\"alert_frequency\"],\n \"alert_data\": alert_data,\n \"end_date\": str(self.anomaly_end_date),\n \"severity_cutoff_score\": self.alert_info[\"severity_cutoff_score\"],\n \"kpi\": self.alert_info[\"kpi\"]\n } \n\n triggered_alert = TriggeredAlerts(\n alert_conf_id=self.alert_info[\"id\"],\n alert_type=\"KPI Alert\",\n is_sent=outcome,\n created_at=datetime.datetime.now(),\n alert_metadata=alert_metadata \n )\n\n triggered_alert.update(commit=True)\n logger.info(\n f\"The triggered alert data was successfully stored\"\n )\n return outcome\n\n def get_overall_subdim_data(self, anomaly_data):\n\n anomaly_data = [anomaly_point.as_dict for anomaly_point in anomaly_data]\n anomaly_data = [{key: value for key, value in anomaly_point.items() if key not in IGNORE_COLUMNS_ANOMALY_TABLE} for anomaly_point in anomaly_data]\n\n for anomaly_point in anomaly_data:\n anomaly_point[\"series_type\"] = \"Overall KPI\" if anomaly_point.get(\"anomaly_type\") == \"overall\" else anomaly_point[\"series_type\"]\n for key, value in anomaly_point.items():\n if key in ANOMALY_TABLE_COLUMNS_HOLDING_FLOATS:\n anomaly_point[key] = round(value, 2)\n if anomaly_point[\"series_type\"] != \"Overall KPI\":\n anomaly_point[\"series_type\"] = convert_query_string_to_user_string(anomaly_point[\"series_type\"])\n\n overall_data = [anomaly_point for anomaly_point in anomaly_data if anomaly_point.get(\"anomaly_type\") == \"overall\"]\n subdim_data = [anomaly_point for anomaly_point in anomaly_data if anomaly_point.get(\"anomaly_type\") == \"subdim\"]\n overall_data.sort(key=lambda anomaly: anomaly.get(\"severity\"), reverse=True)\n subdim_data.sort(key=lambda anomaly: anomaly.get(\"severity\"), reverse=True)\n\n return overall_data, subdim_data\n\n def _find_point(self, point, prev_data):\n \"\"\"Finds same type of point in previous data.\"\"\"\n intended_point = None\n for prev_point in prev_data:\n if prev_point.get(\"series_type\") == point.get(\"series_type\"):\n intended_point = prev_point\n break\n return intended_point\n\n def _save_nl_message_daily_freq(self, anomaly_data: List[dict], kpi: Kpi):\n \"\"\"Saves change message for every point, for a daily frequency KPI.\"\"\"\n time_diff = datetime.timedelta(days=1, hours=0, minutes=0)\n\n # TODO: fix circular import\n from chaos_genius.controllers.digest_controller import get_previous_data\n prev_day_data = get_previous_data(kpi.id, self.anomaly_end_date, time_diff)\n\n prev_day_data = [anomaly_point.as_dict for anomaly_point in prev_day_data]\n\n for point in prev_day_data:\n if point.get(\"anomaly_type\") != \"overall\":\n point[\"series_type\"] = convert_query_string_to_user_string(point[\"series_type\"])\n else:\n point[\"series_type\"] = \"Overall KPI\"\n\n for point in anomaly_data:\n intended_point = self._find_point(point, prev_day_data)\n\n if intended_point is None:\n # previous point wasn't found\n point[\"percentage_change\"] = \"–\"\n elif intended_point[\"y\"] == point[\"y\"]:\n # previous data was same as current\n point[\"percentage_change\"] = 0\n elif intended_point[\"y\"] == 0:\n # previous point was 0\n point[\"percentage_change\"] = \"–\"\n else:\n point[\"percentage_change\"] = find_percentage_change(point[\"y\"], intended_point[\"y\"])\n\n point[\"nl_message\"] = change_message_from_percent(point[\"percentage_change\"])\n\n def _save_nl_message_hourly_freq(self, anomaly_data: List[dict], kpi: Kpi):\n \"\"\"Saves change message for every point, for a hourly frequency KPI.\"\"\"\n data = dict()\n time_diff = datetime.timedelta(days=1, hours=0, minutes=0)\n\n # TODO: fix circular import\n from chaos_genius.controllers.digest_controller import get_previous_data\n prev_day_data = get_previous_data(kpi.id, self.anomaly_end_date, time_diff)\n prev_day_data = [anomaly_point.as_dict for anomaly_point in prev_day_data]\n\n for point in prev_day_data:\n if point.get(\"anomaly_type\") != \"overall\":\n point[\"series_type\"] = convert_query_string_to_user_string(point[\"series_type\"])\n else:\n point[\"series_type\"] = \"Overall KPI\"\n \n for point in prev_day_data:\n if point[\"data_datetime\"].hour not in data.keys():\n data[point[\"data_datetime\"].hour] = []\n data[point[\"data_datetime\"].hour].append(point)\n\n for point in anomaly_data:\n hour_val = point[\"data_datetime\"].hour\n intended_point = self._find_point(point, data.get(hour_val, []))\n if intended_point is None:\n # previous point wasn't found\n point[\"percentage_change\"] = \"–\"\n elif intended_point[\"y\"] == point[\"y\"]:\n # previous data was same as current\n point[\"percentage_change\"] = 0\n elif intended_point[\"y\"] == 0:\n # previous point was 0\n point[\"percentage_change\"] = \"–\"\n else:\n point[\"percentage_change\"] = find_percentage_change(point[\"y\"], intended_point[\"y\"])\n\n point[\"nl_message\"] = change_message_from_percent(point[\"percentage_change\"])\n\n def save_nl_message(self, anomaly_data: List[dict]):\n \"\"\"Constructs and saves change message for every point.\"\"\"\n kpi_id = self.alert_info[\"kpi\"]\n kpi = Kpi.get_by_id(kpi_id)\n if kpi is None:\n for point in anomaly_data:\n point[\"nl_message\"] = \"KPI does not exist\"\n return\n\n time_series_freq = kpi.anomaly_params.get(\"frequency\")\n if time_series_freq is None:\n for point in anomaly_data:\n point[\"nl_message\"] = \"Time series frequency does not exist\"\n return\n\n if time_series_freq in (\"d\", \"D\", \"daily\", \"Daily\"):\n self._save_nl_message_daily_freq(anomaly_data, kpi)\n elif time_series_freq in (\"h\", \"H\", \"hourly\", \"Hourly\"):\n self._save_nl_message_hourly_freq(anomaly_data, kpi)\n else:\n for point in anomaly_data:\n point[\"nl_message\"] = \"Unsupported time series frequency\"\n\n def format_alert_data(self, data: List[dict]):\n \"\"\"Pre-processes anomaly alert data.\"\"\"\n self.save_nl_message(data)\n\n for anomaly_point in data:\n lower = anomaly_point.get(\"yhat_lower\")\n upper = anomaly_point.get(\"yhat_upper\")\n anomaly_point[\"Expected Value\"] = f\"{lower} - {upper}\"\n\n # round off severity for better representation\n anomaly_point[\"severity\"] = round(anomaly_point[\"severity\"])\n\n # rename column names for human readability\n for key, value in ANOMALY_TABLE_COLUMN_NAMES_MAPPER.items():\n anomaly_point[value] = anomaly_point[key]\n\n my_time = time.strptime(str(anomaly_point[\"Time of Occurrence\"]), \"%Y-%m-%d %H:%M:%S\")\n timestamp = time.mktime(my_time)\n date_time = datetime.datetime.fromtimestamp(timestamp)\n new_time = date_time.strftime(\"%b %d %Y %H:%M:%S\")\n anomaly_point[\"Time of Occurrence\"] = new_time\n anomaly_point[\"data_datetime\"] = str(anomaly_point[\"data_datetime\"])\n\n def _remove_attributes_from_anomaly_points(self, anomaly_data: List[dict], list_attributes: List[str]):\n for attr in list_attributes:\n for point in anomaly_data:\n delattr(point, attr)\n\n def send_alert_email(self, anomaly_data):\n\n alert_channel_conf = self.alert_info[\"alert_channel_conf\"]\n\n if type(alert_channel_conf) != dict:\n logger.info(\n f\"The alert channel configuration is incorrect for Alert ID - {self.alert_info['id']}\"\n )\n return False\n\n recipient_emails = alert_channel_conf.get(\"email\", [])\n\n if recipient_emails:\n subject = f\"{self.alert_info['alert_name']} - Chaos Genius Alert ({self.now.strftime('%b %d')})❗\"\n alert_message = self.alert_info[\"alert_message\"]\n\n kpi_id = self.alert_info[\"kpi\"]\n kpi_obj = Kpi.query.filter(Kpi.active == True, Kpi.id == kpi_id).first()\n\n if kpi_obj is None:\n logger.error(f\"No KPI exists for Alert ID - {self.alert_info['id']}\")\n return False\n\n kpi_name = getattr(kpi_obj, 'name')\n\n overall_data, subdim_data = self.get_overall_subdim_data(anomaly_data)\n\n overall_data_email_body = deepcopy([overall_data[0]]) if len(overall_data) > 0 else []\n len_subdim = min(10, len(subdim_data))\n subdim_data_email_body = deepcopy(subdim_data[0:len_subdim]) if len(subdim_data) > 0 else []\n\n overall_data.extend(subdim_data)\n overall_data_email_body.extend(subdim_data_email_body)\n\n self.format_alert_data(overall_data)\n self.format_alert_data(overall_data_email_body)\n\n column_names = ANOMALY_ALERT_COLUMN_NAMES\n overall_data_ = pd.DataFrame(overall_data, columns=column_names)\n files = []\n if not overall_data_.empty:\n file_detail = {}\n file_detail[\"fname\"] = \"data.csv\"\n with io.StringIO() as buffer:\n overall_data_.to_csv(buffer, encoding=\"utf-8\")\n file_detail[\"fdata\"] = buffer.getvalue()\n files = [file_detail]\n\n daily_digest = self.alert_info.get(\"daily_digest\", False)\n weekly_digest = self.alert_info.get(\"weekly_digest\", False)\n\n if not (daily_digest or weekly_digest):\n points = deepcopy(\n [anomaly_point.as_dict for anomaly_point in anomaly_data]\n )\n _format_anomaly_points(points)\n self.format_alert_data(points)\n save_anomaly_point_formatting(points)\n top_anomalies_ = top_anomalies(points, 5)\n overall_count, subdim_count = count_anomalies(points)\n\n test = self.send_template_email(\n \"email_alert.html\",\n recipient_emails,\n subject,\n files,\n column_names=column_names,\n top_anomalies=top_anomalies_,\n alert_message=alert_message,\n kpi_name=kpi_name,\n alert_frequency=self.alert_info['alert_frequency'].capitalize(),\n preview_text=\"Anomaly Alert\",\n alert_name=self.alert_info.get(\"alert_name\"),\n kpi_link=f\"{webapp_url_prefix()}#/dashboard/0/anomaly/{kpi_id}\",\n alert_dashboard_link=f\"{webapp_url_prefix()}api/digest\",\n overall_count=overall_count,\n subdim_count=subdim_count\n )\n logger.info(f\"Status for Alert ID - {self.alert_info['id']} : {test}\")\n #self.remove_attributes_from_anomaly_data(overall_data, [\"nl_message\"])\n # TODO: fix this circular import\n from chaos_genius.controllers.digest_controller import structure_anomaly_data_for_digests\n anomaly_data = structure_anomaly_data_for_digests(overall_data)\n return False, anomaly_data\n else:\n logger.info(\n f\"No receipent email available (Alert ID - {self.alert_info['id']})\"\n )\n return False, None\n\n def send_template_email(self, template, recipient_emails, subject, files, **kwargs):\n \"\"\"Sends an email using a template.\"\"\"\n path = os.path.join(os.path.dirname(__file__), \"email_templates\")\n env = Environment(\n loader=FileSystemLoader(path), autoescape=select_autoescape([\"html\", \"xml\"])\n )\n\n template = env.get_template(template)\n test = send_static_alert_email(recipient_emails, subject, template.render(**kwargs), self.alert_info, files)\n if test == True:\n logger.info(\n f\"The email for Alert ID - {self.alert_info['id']} was successfully sent\"\n )\n else:\n logger.info(\n f\"The email for Alert ID - {self.alert_info['id']} has not been sent\"\n )\n\n return test\n\n def send_slack_alert(self, anomaly_data):\n kpi_id = self.alert_info[\"kpi\"]\n kpi_obj = Kpi.query.filter(Kpi.active == True, Kpi.id == kpi_id).first()\n\n if kpi_obj is None:\n logger.info(f\"No KPI exists for Alert ID - {self.alert_info['id']}\")\n return False, None\n\n data_source_obj = DataSource.query.filter(\n DataSource.id == self.alert_info[\"data_source\"],\n DataSource.active == True\n ).first()\n \n if data_source_obj is None:\n logger.info(f\"The data source provided for Alert ID - {self.alert_info['id']} does not exist\")\n return False, None\n\n kpi_name = getattr(kpi_obj, \"name\")\n data_source_name = getattr(data_source_obj, \"name\")\n alert_name = self.alert_info.get(\"alert_name\")\n alert_message = self.alert_info[\"alert_message\"]\n\n overall_data, subdim_data = self.get_overall_subdim_data(anomaly_data)\n\n overall_data_alert_body = deepcopy([overall_data[0]]) if len(overall_data) > 0 else []\n len_subdim = min(5, len(subdim_data))\n subdim_data_alert_body = deepcopy(subdim_data[0:len_subdim]) if len(subdim_data) > 0 else []\n\n overall_data.extend(subdim_data)\n overall_data_alert_body.extend(subdim_data_alert_body)\n\n self.format_alert_data(overall_data)\n self.format_alert_data(overall_data_alert_body)\n\n daily_digest = self.alert_info.get(\"daily_digest\", False)\n weekly_digest = self.alert_info.get(\"weekly_digest\", False)\n\n test = \"failed\"\n if not (daily_digest or weekly_digest):\n points = deepcopy(\n [anomaly_point.as_dict for anomaly_point in anomaly_data]\n )\n _format_anomaly_points(points)\n self.format_alert_data(points)\n save_anomaly_point_formatting(points)\n top_anomalies_ = top_anomalies(points, 5)\n overall_count, subdim_count = count_anomalies(points)\n\n test = anomaly_alert_slack(\n kpi_name,\n alert_name,\n kpi_id,\n alert_message,\n top_anomalies_,\n overall_count,\n subdim_count\n )\n\n if test == \"ok\":\n logger.info(\n f\"The slack alert for Alert ID - {self.alert_info['id']} was successfully sent\"\n )\n else:\n logger.info(\n f\"The slack alert for Alert ID - {self.alert_info['id']} has not been sent\"\n )\n\n message = f\"Status for KPI ID - {self.alert_info['kpi']}: {test}\"\n test = test == \"ok\"\n #self.remove_attributes_from_anomaly_data(overall_data, [\"nl_message\"])\n # TODO: fix this circular import\n from chaos_genius.controllers.digest_controller import structure_anomaly_data_for_digests\n anomaly_data = structure_anomaly_data_for_digests(overall_data)\n return test, anomaly_data\n\n\nclass StaticKpiAlertController:\n def __init__(self, alert_info):\n self.alert_info = alert_info\n\n def check_and_prepare_alert(self):\n pass\n\n\ndef _format_anomaly_points(points: List[dict]):\n for anomaly_point in points:\n anomaly_point[\"series_type\"] = \"Overall KPI\" if anomaly_point.get(\"anomaly_type\") == \"overall\" else anomaly_point[\"series_type\"]\n for key, value in anomaly_point.items():\n if key in ANOMALY_TABLE_COLUMNS_HOLDING_FLOATS:\n anomaly_point[key] = round(value, 2)\n if anomaly_point[\"series_type\"] != \"Overall KPI\":\n anomaly_point[\"series_type\"] = convert_query_string_to_user_string(anomaly_point[\"series_type\"])\n\n\ndef find_percentage_change(\n curr_val: Union[int, float],\n prev_val: Union[int, float]\n) -> Union[int, float, str]:\n \"\"\"Calculates percentage change between previous and current value.\"\"\"\n if prev_val == 0:\n return \"-\"\n change = curr_val - prev_val\n percentage_change = (change / prev_val) * 100\n return round_number(percentage_change)\n\n\ndef change_message_from_percent(percent_change: Union[str, int, float]) -> str:\n \"\"\"Creates a change message from given percentage change.\n\n percent_change will be:\n - \"-\" in case the last data point was missing\n - 0 (int) in case there was no change\n - positive value (int/float) in case there was an increase\n - negative value (int/float) in case there was a decrease\n \"\"\"\n if isinstance(percent_change, str):\n return \"-\"\n elif percent_change == 0:\n return \"No change (-)\"\n elif percent_change > 0:\n return f\"Increased by ({percent_change}%)\"\n else:\n return f\"Decreased by ({percent_change}%)\"\n\ndef check_and_trigger_alert(alert_id):\n \"\"\"Check the alert and trigger the notification if found\n\n Args:\n alert_id (int): alert id\n\n Raises:\n Exception: Raise if the alert record not found\n\n Returns:\n bool: status of the alert trigger\n \"\"\"\n alert_info = Alert.get_by_id(alert_id)\n if not alert_info:\n raise Exception(\"Alert doesn't exist\")\n\n if not (alert_info.active and alert_info.alert_status):\n print(\"Alert isn't active. Please activate the alert.\")\n return True\n\n if alert_info.alert_type == \"Event Alert\":\n\n data_source_id = alert_info.data_source\n data_source_obj = DataSource.get_by_id(data_source_id)\n static_alert_obj = StaticEventAlertController(\n alert_info.as_dict, data_source_obj.as_dict\n )\n static_alert_obj.check_and_prepare_alert()\n elif (\n alert_info.alert_type == \"KPI Alert\" and alert_info.kpi_alert_type == \"Anomaly\"\n ):\n anomaly_obj = AnomalyAlertController(alert_info.as_dict)\n return anomaly_obj.check_and_prepare_alert()\n elif alert_info.alert_type == \"KPI Alert\" and alert_info.kpi_alert_type == \"Static\":\n static_kpi_alert = StaticKpiAlertController(alert_info.as_dict)\n\n return True\n\n\ndef trigger_anomaly_alerts_for_kpi(\n kpi_obj: Kpi, end_date: date\n) -> Tuple[List[int], List[int]]:\n \"\"\"Triggers anomaly alerts starting from end_date.\n\n Args:\n kpi_obj (Kpi): Object of kpi for which alerts are to be triggered\n end_date (dateimte.datetime): Datetime object containing the upper bound of anomaly date values\n\n Returns:\n List[int]: List of alert IDs for which alert messages were successfully sent\n List[int]: List of alert IDs for which alert failed\n \"\"\"\n success_alerts = []\n errors = []\n alerts = Alert.query.filter(\n Alert.kpi == kpi_obj.id, Alert.active == True, Alert.alert_status == True\n ).all()\n for alert in alerts:\n try:\n anomaly_obj = AnomalyAlertController(\n alert.as_dict, anomaly_end_date=end_date\n )\n anomaly_obj.check_and_prepare_alert()\n success_alerts.append(alert.id)\n except Exception as e:\n logger.error(f\"Error running alert for Alert ID: {alert.id}\", exc_info=e)\n errors.append(alert.id)\n return success_alerts, errors\n"
] |
[
[
"pandas.concat",
"numpy.isinf",
"numpy.isnan",
"pandas.DataFrame",
"pandas.isna",
"matplotlib.pyplot.show",
"pandas.qcut"
],
[
"pandas.concat",
"pandas.read_pickle",
"pandas.DataFrame"
]
] |
modelearth/useeio_api
|
[
"2a4cd9776be69afd2560fff733a4e6cc31dad8cc"
] |
[
"python/useeioapi/data.py"
] |
[
"import csv\nimport json\nimport os\nimport struct\n\nimport numpy\n\n\ndef read_sectors(folder: str, model_id: str) -> list:\n path = folder + '/' + model_id + '/sectors.csv'\n sectors = []\n for row in read_csv(path):\n sectors.append({\n 'index': int(row[0]),\n 'id': row[1],\n 'name': row[2],\n 'code': row[3],\n 'location': row[4],\n 'description': row[5],\n })\n sectors.sort(key=lambda s: s['index'])\n return sectors\n\n\ndef read_flows(folder: str, model_id: str) -> list:\n path = folder + '/' + model_id + '/flows.csv'\n flows = []\n for row in read_csv(path):\n flows.append({\n 'index': int(row[0]),\n 'id': row[1],\n 'name': row[2],\n 'category': row[3],\n 'subCategory': row[4],\n 'unit': row[5],\n 'uuid': row[6],\n })\n flows.sort(key=lambda s: s['index'])\n return flows\n\n\ndef read_indicators(folder: str, model_id: str):\n path = folder + '/' + model_id + '/indicators.csv'\n indicators = []\n for row in read_csv(path):\n indicators.append({\n 'index': int(row[0]),\n 'id': row[3],\n 'name': row[2],\n 'code': row[3],\n 'unit': row[4],\n 'group': row[5]\n })\n indicators.sort(key=lambda s: s['index'])\n return indicators\n\n\ndef read_model_infos(data_folder: str):\n infos = []\n for row in read_csv(data_folder + '/models.csv'):\n infos.append({\n 'id': row[0],\n 'name': row[1],\n 'location': row[2],\n 'description': row[3]\n })\n return infos\n\n\ndef read_demand_infos(data_folder: str, model_id: str):\n path = data_folder + '/' + model_id + '/demands.csv'\n infos = []\n for row in read_csv(path):\n infos.append({\n 'id': row[0],\n 'year': int(row[1]),\n 'type': row[2],\n 'system': row[3],\n 'location': row[4],\n })\n return infos\n\n\ndef read_demand(data_folder: str, model_id: str, demand_id: str):\n path = os.path.join(data_folder, model_id, 'demands', demand_id + '.json')\n if not os.path.isfile(path):\n return None\n with open(path, 'r', encoding='utf-8') as f:\n return json.load(f)\n\n\ndef read_csv(path, skip_header=True) -> list:\n with open(path, 'r', encoding='utf-8', newline='\\n') as f:\n r = csv.reader(f)\n if skip_header:\n next(r)\n for row in r:\n yield row\n\n\ndef read_matrix(data_folder: str, model_id: str, name: str):\n path = data_folder + '/' + model_id + '/' + name + '.bin'\n if not os.path.isfile(path):\n return None\n shape = _read_matrix_shape(path)\n return numpy.memmap(path, mode='c', dtype='<f8',\n shape=shape, offset=8, order='F')\n\n\ndef read_dqi_matrix(data_folder: str, model_id: str, name: str):\n path = data_folder + '/' + model_id + '/' + name + '.csv'\n if not os.path.isfile(path):\n return None\n dqi = []\n for row in read_csv(path, skip_header=False):\n dqi.append(row)\n return dqi\n\n\ndef _read_matrix_shape(file_path: str):\n \"\"\" Reads and returns the shape (rows, columns) from the matrix stored in\n the given file.\n \"\"\"\n with open(file_path, 'rb') as f:\n rows = struct.unpack('<i', f.read(4))[0]\n cols = struct.unpack('<i', f.read(4))[0]\n return rows, cols\n"
] |
[
[
"numpy.memmap"
]
] |
matthoffman/degas
|
[
"878f10b40fa2efa008986823e0c7fba27df8a2d4"
] |
[
"degas/dataset.py"
] |
[
"# -*- coding: utf-8 -*-\nimport logging\nfrom typing import List\nfrom csv import QUOTE_ALL\nfrom pathlib import Path\nfrom glob import glob\nimport pandas as pd\n\n\n##########################################\n# some constants for use in other files\n##########################################\n\n# key for our dataframe that contains the data (the domains)\nDATA_KEY = \"domain\"\n# key for our dataframe that contains the labels (whether it's a DGA or not)\nLABEL_KEY = \"class\"\nDATASET_FILENAME = \"dataset.csv.gz\"\n\n\ndef process(input_filepath: str, output_filepath: str) -> None:\n \"\"\" Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info(\"making final data set from raw data\")\n\n logger.info(\"Loading DGA datasets\")\n dgas = concat(\n [\n # good wordlist, but the code is GPL so by the default the data is as well (although it's compiled from a\n # variety of public-domain sources)\n # load_andrewaeva(join(input_filepath, \"andrewaeva-dga-list.csv.gz\")),\n load_bambenek(join(input_filepath, \"bambenek.*.csv.gz\")),\n load_subset(join(input_filepath, \"subset.csv.gz\")),\n load_other_dga(join(input_filepath, \"dga-*.csv.gz\")),\n ],\n dga=True,\n )\n\n logger.info(\"Loading benign domain datasets\")\n benign = concat(\n [\n load_cisco(join(input_filepath, \"cisco-umbrella-top-1m.csv.zip\")),\n load_majestic_million(join(input_filepath, \"majestic_million.csv.gz\")),\n load_alexa(join(input_filepath, \"alexa.csv.gz\")),\n load_top10million(join(input_filepath, \"top10milliondomains.csv.zip\")),\n ],\n dga=False,\n )\n\n logger.info(\"Loaded a total of %i DGA domains and %i benign domains\", len(dgas), len(benign))\n logger.info(\"There are %i *unique* benign domains\", len(benign.drop_duplicates()))\n full: pd.DataFrame = pd.concat([dgas, benign], ignore_index=True)\n logger.info(\"created a dataset of %i records (of which %.2f%% are DGAs)\", len(full), full[LABEL_KEY].mean() * 100)\n full.to_csv(join(output_filepath, DATASET_FILENAME), header=True, index=False, compression=\"gzip\")\n logger.info(\"dataset creation complete. dataset.csv.gz written to %s\", output_filepath)\n\n\ndef concat(dataframes: List[pd.DataFrame], dga=False) -> pd.DataFrame:\n \"\"\"\n Concatenate dataframes containing all DGAs or all benign domains and add a column indicating their label.\n \"\"\"\n df = pd.concat(dataframes, ignore_index=True)\n if dga:\n df[LABEL_KEY] = 1\n else:\n df[LABEL_KEY] = 0\n return df\n\n\ndef load_top10million(path: Path) -> pd.DataFrame:\n \"\"\"\n A list of the top 10 million domains according to the Open PageRank Initiative, based on Common Crawl data.\n Since these are actually crawled, we assume they are not DGAs.\n\n This file has a header, and looks like this:\n \"Rank\",\"Domain\",\"Open Page Rank\"\n \"1\",\"facebook.com\",\"10.00\"\n \"\"\"\n if not path.exists():\n logging.info(\"No file named '%s' found, skipping\", path)\n return pd.DataFrame()\n\n logging.info(\" - reading %s\", path)\n df = pd.read_csv(path, quoting=QUOTE_ALL)\n df.drop(columns=[\"Rank\", \"Open Page Rank\"], inplace=True)\n df.rename(index=str, columns={\"Domain\": DATA_KEY}, inplace=True)\n logging.info(\" - read %i records from %s\", len(df), path)\n return df\n\n\ndef load_majestic_million(path: Path) -> pd.DataFrame:\n \"\"\"\n load the \"majestic million\" top 1 million website dataset. This has significant overlap with Alexa and Cisco's \"top\n 1 million domain\" datasets, obviously, but that's OK. No harm in that.\n\n This actually has a header row. First couple lines are:\n GlobalRank,TldRank,Domain,TLD,RefSubNets,RefIPs,IDN_Domain,IDN_TLD,PrevGlobalRank,PrevTldRank,PrevRefSubNets,PrevRefIPs\n1,1,google.com,com,487267,3086039,google.com,com,1,1,487043,3085865\n \"\"\"\n if not path.exists():\n logging.info(\"No file named '%s' found, skipping\", path)\n return pd.DataFrame()\n\n logging.info(\" - reading %s\", path)\n df = pd.read_csv(path, names=[DATA_KEY], usecols=[2], skiprows=1)\n logging.info(\" - read %i records from %s\", len(df), path)\n return df\n\n\ndef load_other_dga(path: Path) -> pd.DataFrame:\n \"\"\" Load other files containing DGA domains from not-previously-known sources\n\n We'll load them by glob pattern, and expect that they are comma-separated with domains in the first column, with no\n header.\n We'll treat lines that start with '#' as comments\n \"\"\"\n dga = pd.DataFrame()\n for p in glob(str(path)):\n logging.info(\" - reading %s\", p)\n this_dga = pd.read_csv(p, header=None, comment=\"#\", names=[DATA_KEY], usecols=[0])\n logging.info(\" - read %i records from %s\", len(this_dga), path)\n dga = dga.append(this_dga, ignore_index=True, verify_integrity=True)\n dga.drop_duplicates(inplace=True)\n return dga\n\n\ndef load_subset(path: Path) -> pd.DataFrame:\n \"\"\" A small subset of the input data, for testing purposes.\n \"\"\"\n # chr(1) is ctrl-A, which is a pretty vile separator char, TBH. I mean, couldn't it at least be ctrl-^\n # (\"record separator\") or ctrl-_ (\"unit separator\")?\n if not path.exists():\n logging.info(\"No file named '%s' found, skipping\", path)\n return pd.DataFrame()\n\n logging.info(\" - reading %s\", path)\n subset = pd.read_csv(\n path,\n delimiter=chr(1),\n names=[DATA_KEY, \"desc\", \"class\"],\n usecols=[0, 2],\n header=None,\n error_bad_lines=False,\n )\n logging.info(\" - read %i records from %s\", len(subset), path)\n return subset\n\n\ndef load_andrewaeva(path: Path) -> pd.DataFrame:\n \"\"\" This dataset is from andrewaeva (github.com:andrewaeva/DGA.git), where it is used as the training set of for a\n couple of DGA detection models.\n It's a simple CSV dataset without a header, with 2 columns: domain, and a number representing the algorithm that\n generated it.\n For our purposes, we don't care which algorithm generated the domain, so we'll just pull the first column.\n \"\"\"\n if not path.exists():\n logging.info(\"No file named '%s' found, skipping\", path)\n return pd.DataFrame()\n\n logging.info(\" - reading %s\", path)\n dga1 = pd.read_csv(path, delimiter=\"\\\\s+\", header=None, names=[DATA_KEY, \"dga_type\"], usecols=[0])\n logging.info(\" - read %i records from %s\", len(dga1), path)\n return dga1\n\n\ndef load_bambenek(path: Path) -> pd.DataFrame:\n \"\"\" Bambenek consulting publishes a regular feed of DGA-generated URLs to [TODO: URL]\n\n It has a license info in a header, but not an actual header line.\n The actual file content looks like this:\n plvklpgwivery.com,Domain used by Cryptolocker - Flashback DGA for 23 Jun 2018,2018-06-23,http://osint.bambenekconsulting.com/manual/cl.txt\n\n There are several files, currently, so this handles glob-style wildcards in the incoming path\n \"\"\"\n dga = pd.DataFrame()\n for p in glob(str(path)):\n logging.info(\" - reading %s\", p)\n this_dga = pd.read_csv(p, header=None, comment=\"#\", names=[DATA_KEY], usecols=[0])\n logging.info(\" - read %i records from %s\", len(this_dga), path)\n dga = dga.append(this_dga, ignore_index=True, verify_integrity=True)\n dga.drop_duplicates(inplace=True)\n return dga\n\n\ndef load_cisco(path: Path) -> pd.DataFrame:\n \"\"\" Cisco publishes a \"top 1 million URLs\" dataset. Being popularity-based, we assume that none of these are DGAs.\n \"\"\"\n if not path.exists():\n logging.info(\"No file named '%s' found, skipping\", path)\n return pd.DataFrame()\n\n logging.info(\" - reading %s\", path)\n benign = pd.read_csv(path, header=None, comment=\"#\", names=[\"rank\", DATA_KEY], usecols=[1])\n logging.info(\" - read %i records from %s\", len(benign), path)\n return benign\n\n\ndef load_alexa(path: Path) -> pd.DataFrame:\n \"\"\"\n Load the top 1 million websites according to Alexa.\n This is a space-separated file with 2 columns, domain and rank (I believe?). We only care about the domain.\n \"\"\"\n if not path.exists():\n logging.info(\"No file named '%s' found, skipping\", path)\n return pd.DataFrame()\n\n logging.info(\" - reading %s\", path)\n df = pd.read_csv(path, header=None, delimiter=\"\\\\s+\", names=[DATA_KEY], usecols=[0])\n logging.info(\" - read %i records from %s\", len(df), path)\n return df\n\n\ndef join(base: str, filename: str) -> Path:\n \"\"\"\n Yes, we could use os.path.join, but I wanted to deal in Path objects instead\n \"\"\"\n return Path(base).joinpath(filename)\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
] |
cdchushig/smote_variants
|
[
"c6a0f57dc8b4002800a5a413879323aaba04e0dd"
] |
[
"smote_variants/_smote_variants.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 15 11:15:24 2018\n\n@author: gykovacs\n\"\"\"\n\n# import system packages\nimport os\nimport pickle\nimport itertools\nimport logging\nimport re\nimport time\nimport glob\nimport inspect\n\n# used to parallelize evaluation\nfrom joblib import Parallel, delayed\n\n# numerical methods and arrays\nimport numpy as np\nimport pandas as pd\n\n# import packages used for the implementation of sampling methods\nfrom sklearn.model_selection import (RepeatedStratifiedKFold, KFold,\n cross_val_score, StratifiedKFold)\nfrom sklearn.neighbors import NearestNeighbors, KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression, LinearRegression\nfrom sklearn.metrics import (log_loss, roc_auc_score, accuracy_score,\n confusion_matrix, f1_score)\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN\nfrom sklearn.manifold import LocallyLinearEmbedding, TSNE, Isomap\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.base import clone, BaseEstimator, ClassifierMixin\n\n# some statistical methods\nfrom scipy.stats import skew\nimport scipy.signal as ssignal\nimport scipy.spatial as sspatial\nimport scipy.optimize as soptimize\nimport scipy.special as sspecial\nfrom scipy.stats.mstats import gmean\n\nfrom ._version import __version__\n\n__author__ = \"György Kovács\"\n__license__ = \"MIT\"\n__email__ = \"[email protected]\"\n\n# for handler in _logger.root.handlers[:]:\n# _logger.root.removeHandler(handler)\n\n# setting the _logger format\n_logger = logging.getLogger('smote_variants')\n_logger.setLevel(logging.DEBUG)\n_logger_ch = logging.StreamHandler()\n_logger_ch.setFormatter(logging.Formatter(\n \"%(asctime)s:%(levelname)s:%(message)s\"))\n_logger.addHandler(_logger_ch)\n\n# exported names\n__all__ = ['__author__',\n '__license__',\n '__version__',\n '__email__',\n 'get_all_oversamplers',\n 'get_all_noisefilters',\n 'get_n_quickest_oversamplers',\n 'get_all_oversamplers_multiclass',\n 'get_n_quickest_oversamplers_multiclass',\n 'evaluate_oversamplers',\n 'read_oversampling_results',\n 'model_selection',\n 'cross_validate',\n 'MLPClassifierWrapper',\n 'OverSampling',\n 'NoiseFilter',\n 'TomekLinkRemoval',\n 'CondensedNearestNeighbors',\n 'OneSidedSelection',\n 'CNNTomekLinks',\n 'NeighborhoodCleaningRule',\n 'EditedNearestNeighbors',\n 'SMOTE',\n 'SMOTE_TomekLinks',\n 'SMOTE_ENN',\n 'Borderline_SMOTE1',\n 'Borderline_SMOTE2',\n 'ADASYN',\n 'AHC',\n 'LLE_SMOTE',\n 'distance_SMOTE',\n 'SMMO',\n 'polynom_fit_SMOTE',\n 'Stefanowski',\n 'ADOMS',\n 'Safe_Level_SMOTE',\n 'MSMOTE',\n 'DE_oversampling',\n 'SMOBD',\n 'SUNDO',\n 'MSYN',\n 'SVM_balance',\n 'TRIM_SMOTE',\n 'SMOTE_RSB',\n 'ProWSyn',\n 'SL_graph_SMOTE',\n 'NRSBoundary_SMOTE',\n 'LVQ_SMOTE',\n 'SOI_CJ',\n 'ROSE',\n 'SMOTE_OUT',\n 'SMOTE_Cosine',\n 'Selected_SMOTE',\n 'LN_SMOTE',\n 'MWMOTE',\n 'PDFOS',\n 'IPADE_ID',\n 'RWO_sampling',\n 'NEATER',\n 'DEAGO',\n 'Gazzah',\n 'MCT',\n 'ADG',\n 'SMOTE_IPF',\n 'KernelADASYN',\n 'MOT2LD',\n 'V_SYNTH',\n 'OUPS',\n 'SMOTE_D',\n 'SMOTE_PSO',\n 'CURE_SMOTE',\n 'SOMO',\n 'ISOMAP_Hybrid',\n 'CE_SMOTE',\n 'Edge_Det_SMOTE',\n 'CBSO',\n 'E_SMOTE',\n 'DBSMOTE',\n 'ASMOBD',\n 'Assembled_SMOTE',\n 'SDSMOTE',\n 'DSMOTE',\n 'G_SMOTE',\n 'NT_SMOTE',\n 'Lee',\n 'SPY',\n 'SMOTE_PSOBAT',\n 'MDO',\n 'Random_SMOTE',\n 'ISMOTE',\n 'VIS_RST',\n 'GASMOTE',\n 'A_SUWO',\n 'SMOTE_FRST_2T',\n 'AND_SMOTE',\n 'NRAS',\n 'AMSCO',\n 'SSO',\n 'NDO_sampling',\n 'DSRBF',\n 'Gaussian_SMOTE',\n 'kmeans_SMOTE',\n 'Supervised_SMOTE',\n 'SN_SMOTE',\n 'CCR',\n 'ANS',\n 'cluster_SMOTE',\n 'NoSMOTE',\n 'MulticlassOversampling',\n 'OversamplingClassifier']\n\n\ndef get_all_oversamplers():\n \"\"\"\n Returns all oversampling classes\n\n Returns:\n list(OverSampling): list of all oversampling classes\n\n Example::\n\n import smote_variants as sv\n\n oversamplers= sv.get_all_oversamplers()\n \"\"\"\n\n return OverSampling.__subclasses__()\n\n\ndef get_n_quickest_oversamplers(n=10):\n \"\"\"\n Returns the n quickest oversamplers based on testing on the datasets of\n the imbalanced_databases package.\n\n Args:\n n (int): number of oversamplers to return\n\n Returns:\n list(OverSampling): list of the n quickest oversampling classes\n\n Example::\n\n import smote_variants as sv\n\n oversamplers= sv.get_n_quickest_oversamplers(10)\n \"\"\"\n\n runtimes = {'SPY': 0.11, 'OUPS': 0.16, 'SMOTE_D': 0.20, 'NT_SMOTE': 0.20,\n 'Gazzah': 0.21, 'ROSE': 0.25, 'NDO_sampling': 0.27,\n 'Borderline_SMOTE1': 0.28, 'SMOTE': 0.28,\n 'Borderline_SMOTE2': 0.29, 'ISMOTE': 0.30, 'SMMO': 0.31,\n 'SMOTE_OUT': 0.37, 'SN_SMOTE': 0.44, 'Selected_SMOTE': 0.47,\n 'distance_SMOTE': 0.47, 'Gaussian_SMOTE': 0.48, 'MCT': 0.51,\n 'Random_SMOTE': 0.57, 'ADASYN': 0.58, 'SL_graph_SMOTE': 0.58,\n 'CURE_SMOTE': 0.59, 'ANS': 0.63, 'MSMOTE': 0.72,\n 'Safe_Level_SMOTE': 0.79, 'SMOBD': 0.80, 'CBSO': 0.81,\n 'Assembled_SMOTE': 0.82, 'SDSMOTE': 0.88,\n 'SMOTE_TomekLinks': 0.91, 'Edge_Det_SMOTE': 0.94,\n 'ProWSyn': 1.00, 'Stefanowski': 1.04, 'NRAS': 1.06,\n 'AND_SMOTE': 1.13, 'DBSMOTE': 1.17, 'polynom_fit_SMOTE': 1.18,\n 'ASMOBD': 1.18, 'MDO': 1.18, 'SOI_CJ': 1.24, 'LN_SMOTE': 1.26,\n 'VIS_RST': 1.34, 'TRIM_SMOTE': 1.36, 'LLE_SMOTE': 1.62,\n 'SMOTE_ENN': 1.86, 'SMOTE_Cosine': 2.00, 'kmeans_SMOTE': 2.43,\n 'MWMOTE': 2.45, 'V_SYNTH': 2.59, 'A_SUWO': 2.81,\n 'RWO_sampling': 2.91, 'SMOTE_RSB': 3.88, 'ADOMS': 3.89,\n 'SMOTE_IPF': 4.10, 'Lee': 4.16, 'SMOTE_FRST_2T': 4.18,\n 'cluster_SMOTE': 4.19, 'SOMO': 4.30, 'DE_oversampling': 4.67,\n 'CCR': 4.72, 'NRSBoundary_SMOTE': 5.26, 'AHC': 5.27,\n 'ISOMAP_Hybrid': 6.11, 'LVQ_SMOTE': 6.99, 'CE_SMOTE': 7.45,\n 'MSYN': 11.92, 'PDFOS': 15.14, 'KernelADASYN': 17.87,\n 'G_SMOTE': 19.23, 'E_SMOTE': 19.50, 'SVM_balance': 24.05,\n 'SUNDO': 26.21, 'GASMOTE': 31.38, 'DEAGO': 33.39,\n 'NEATER': 41.39, 'SMOTE_PSO': 45.12, 'IPADE_ID': 90.01,\n 'DSMOTE': 146.73, 'MOT2LD': 149.42, 'Supervised_SMOTE': 195.74,\n 'SSO': 215.27, 'DSRBF': 272.11, 'SMOTE_PSOBAT': 324.31,\n 'ADG': 493.64, 'AMSCO': 1502.36}\n\n samplers = get_all_oversamplers()\n samplers = sorted(\n samplers, key=lambda x: runtimes.get(x.__name__, 1e8))\n\n return samplers[:n]\n\n\ndef get_all_oversamplers_multiclass(strategy=\"eq_1_vs_many_successive\"):\n \"\"\"\n Returns all oversampling classes which can be used with the multiclass\n strategy specified\n\n Args:\n strategy (str): the multiclass oversampling strategy -\n 'eq_1_vs_many_successive'/'equalize_1_vs_many'\n\n Returns:\n list(OverSampling): list of all oversampling classes which can be used\n with the multiclass strategy specified\n\n Example::\n\n import smote_variants as sv\n\n oversamplers= sv.get_all_oversamplers_multiclass()\n \"\"\"\n\n oversamplers = get_all_oversamplers()\n\n if (strategy == 'eq_1_vs_many_successive' or\n strategy == 'equalize_1_vs_many'):\n\n def multiclass_filter(o):\n return ((OverSampling.cat_changes_majority not in o.categories) or\n ('proportion' in o().get_params()))\n\n return [o for o in oversamplers if multiclass_filter(o)]\n else:\n raise ValueError((\"It is not known which oversamplers work with the\"\n \" strategy %s\") % strategy)\n\n\ndef get_n_quickest_oversamplers_multiclass(n,\n strategy=\"eq_1_vs_many_successive\"):\n \"\"\"\n Returns the n quickest oversamplers based on testing on the datasets of\n the imbalanced_databases package, and suitable for using the multiclass\n strategy specified.\n\n Args:\n n (int): number of oversamplers to return\n strategy (str): the multiclass oversampling strategy -\n 'eq_1_vs_many_successive'/'equalize_1_vs_many'\n\n Returns:\n list(OverSampling): list of n quickest oversampling classes which can\n be used with the multiclass strategy specified\n\n Example::\n\n import smote_variants as sv\n\n oversamplers= sv.get_n_quickest_oversamplers_multiclass()\n \"\"\"\n\n oversamplers = get_all_oversamplers()\n quickest_oversamplers = get_n_quickest_oversamplers(len(oversamplers))\n\n if (strategy == 'eq_1_vs_many_successive'\n or strategy == 'equalize_1_vs_many'):\n\n def multiclass_filter(o):\n return ((OverSampling.cat_changes_majority not in o.categories) or\n ('proportion' in o().get_params()))\n\n return [o for o in quickest_oversamplers if multiclass_filter(o)][:n]\n else:\n raise ValueError(\"It is not known which oversamplers work with the\"\n \" strategy %s\" % strategy)\n\n\ndef get_all_noisefilters():\n \"\"\"\n Returns all noise filters\n Returns:\n list(NoiseFilter): list of all noise filter classes\n \"\"\"\n return NoiseFilter.__subclasses__()\n\n\ndef mode(data):\n values, counts = np.unique(data, return_counts=True)\n return values[np.where(counts == max(counts))[0][0]]\n\n\nclass StatisticsMixin:\n \"\"\"\n Mixin to compute class statistics and determine minority/majority labels\n \"\"\"\n\n def class_label_statistics(self, X, y):\n \"\"\"\n determines class sizes and minority and majority labels\n Args:\n X (np.array): features\n y (np.array): target labels\n \"\"\"\n unique, counts = np.unique(y, return_counts=True)\n self.class_stats = dict(zip(unique, counts))\n self.min_label = unique[0] if counts[0] < counts[1] else unique[1]\n self.maj_label = unique[1] if counts[0] < counts[1] else unique[0]\n # shorthands\n self.min_label = self.min_label\n self.maj_label = self.maj_label\n\n def check_enough_min_samples_for_sampling(self, threshold=2):\n if self.class_stats[self.min_label] < threshold:\n m = (\"The number of minority samples (%d) is not enough \"\n \"for sampling\")\n m = m % self.class_stats[self.min_label]\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return False\n return True\n\n\nclass RandomStateMixin:\n \"\"\"\n Mixin to set random state\n \"\"\"\n\n def set_random_state(self, random_state):\n \"\"\"\n sets the random_state member of the object\n\n Args:\n random_state (int/np.random.RandomState/None): the random state\n initializer\n \"\"\"\n\n self._random_state_init = random_state\n\n if random_state is None:\n self.random_state = np.random\n elif isinstance(random_state, int):\n self.random_state = np.random.RandomState(random_state)\n elif isinstance(random_state, np.random.RandomState):\n self.random_state = random_state\n elif random_state is np.random:\n self.random_state = random_state\n else:\n raise ValueError(\n \"random state cannot be initialized by \" + str(random_state))\n\n\nclass ParameterCheckingMixin:\n \"\"\"\n Mixin to check if parameters come from a valid range\n \"\"\"\n\n def check_in_range(self, x, name, r):\n \"\"\"\n Check if parameter is in range\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n r (list-like(2)): the lower and upper bound of a range\n Throws:\n ValueError\n \"\"\"\n if x < r[0] or x > r[1]:\n m = (\"Value for parameter %s outside the range [%f,%f] not\"\n \" allowed: %f\")\n m = m % (name, r[0], r[1], x)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_out_range(self, x, name, r):\n \"\"\"\n Check if parameter is outside of range\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n r (list-like(2)): the lower and upper bound of a range\n Throws:\n ValueError\n \"\"\"\n if x >= r[0] and x <= r[1]:\n m = \"Value for parameter %s in the range [%f,%f] not allowed: %f\"\n m = m % (name, r[0], r[1], x)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_less_or_equal(self, x, name, val):\n \"\"\"\n Check if parameter is less than or equal to value\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n val (numeric): value to compare to\n Throws:\n ValueError\n \"\"\"\n if x > val:\n m = \"Value for parameter %s greater than %f not allowed: %f > %f\"\n m = m % (name, val, x, val)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_less_or_equal_par(self, x, name_x, y, name_y):\n \"\"\"\n Check if parameter is less than or equal to another parameter\n Args:\n x (numeric): the parameter value\n name_x (str): the parameter name\n y (numeric): the other parameter value\n name_y (str): the other parameter name\n Throws:\n ValueError\n \"\"\"\n if x > y:\n m = (\"Value for parameter %s greater than parameter %s not\"\n \" allowed: %f > %f\")\n m = m % (name_x, name_y, x, y)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_less(self, x, name, val):\n \"\"\"\n Check if parameter is less than value\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n val (numeric): value to compare to\n Throws:\n ValueError\n \"\"\"\n if x >= val:\n m = (\"Value for parameter %s greater than or equal to %f\"\n \" not allowed: %f >= %f\")\n m = m % (name, val, x, val)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_less_par(self, x, name_x, y, name_y):\n \"\"\"\n Check if parameter is less than another parameter\n Args:\n x (numeric): the parameter value\n name_x (str): the parameter name\n y (numeric): the other parameter value\n name_y (str): the other parameter name\n Throws:\n ValueError\n \"\"\"\n if x >= y:\n m = (\"Value for parameter %s greater than or equal to parameter\"\n \" %s not allowed: %f >= %f\")\n m = m % (name_x, name_y, x, y)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_greater_or_equal(self, x, name, val):\n \"\"\"\n Check if parameter is greater than or equal to value\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n val (numeric): value to compare to\n Throws:\n ValueError\n \"\"\"\n if x < val:\n m = \"Value for parameter %s less than %f is not allowed: %f < %f\"\n m = m % (name, val, x, val)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_greater_or_equal_par(self, x, name_x, y, name_y):\n \"\"\"\n Check if parameter is less than or equal to another parameter\n Args:\n x (numeric): the parameter value\n name_x (str): the parameter name\n y (numeric): the other parameter value\n name_y (str): the other parameter name\n Throws:\n ValueError\n \"\"\"\n if x < y:\n m = (\"Value for parameter %s less than parameter %s is not\"\n \" allowed: %f < %f\")\n m = m % (name_x, name_y, x, y)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_greater(self, x, name, val):\n \"\"\"\n Check if parameter is greater than value\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n val (numeric): value to compare to\n Throws:\n ValueError\n \"\"\"\n if x <= val:\n m = (\"Value for parameter %s less than or equal to %f not allowed\"\n \" %f < %f\")\n m = m % (name, val, x, val)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_greater_par(self, x, name_x, y, name_y):\n \"\"\"\n Check if parameter is greater than or equal to another parameter\n Args:\n x (numeric): the parameter value\n name_x (str): the parameter name\n y (numeric): the other parameter value\n name_y (str): the other parameter name\n Throws:\n ValueError\n \"\"\"\n if x <= y:\n m = (\"Value for parameter %s less than or equal to parameter %s\"\n \" not allowed: %f <= %f\")\n m = m % (name_x, name_y, x, y)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_equal(self, x, name, val):\n \"\"\"\n Check if parameter is equal to value\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n val (numeric): value to compare to\n Throws:\n ValueError\n \"\"\"\n if x == val:\n m = (\"Value for parameter %s equal to parameter %f is not allowed:\"\n \" %f == %f\")\n m = m % (name, val, x, val)\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_equal_par(self, x, name_x, y, name_y):\n \"\"\"\n Check if parameter is equal to another parameter\n Args:\n x (numeric): the parameter value\n name_x (str): the parameter name\n y (numeric): the other parameter value\n name_y (str): the other parameter name\n Throws:\n ValueError\n \"\"\"\n if x == y:\n m = (\"Value for parameter %s equal to parameter %s is not \"\n \"allowed: %f == %f\")\n m = m % (name_x, name_y, x, y)\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_isin(self, x, name, li):\n \"\"\"\n Check if parameter is in list\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n li (list): list to check if parameter is in it\n Throws:\n ValueError\n \"\"\"\n if x not in li:\n m = \"Value for parameter %s not in list %s is not allowed: %s\"\n m = m % (name, str(li), str(x))\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_n_jobs(self, x, name):\n \"\"\"\n Check n_jobs parameter\n Args:\n x (int/None): number of jobs\n name (str): the parameter name\n Throws:\n ValueError\n \"\"\"\n if not ((x is None)\n or (x is not None and isinstance(x, int) and not x == 0)):\n m = \"Value for parameter n_jobs is not allowed: %s\" % str(x)\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n\nclass ParameterCombinationsMixin:\n \"\"\"\n Mixin to generate parameter combinations\n \"\"\"\n\n @classmethod\n def generate_parameter_combinations(cls, dictionary, raw):\n \"\"\"\n Generates reasonable paramter combinations\n Args:\n dictionary (dict): dictionary of paramter ranges\n num (int): maximum number of combinations to generate\n \"\"\"\n if raw:\n return dictionary\n keys = sorted(list(dictionary.keys()))\n values = [dictionary[k] for k in keys]\n combinations = [dict(zip(keys, p))\n for p in list(itertools.product(*values))]\n return combinations\n\n\nclass NoiseFilter(StatisticsMixin,\n ParameterCheckingMixin,\n ParameterCombinationsMixin):\n \"\"\"\n Parent class of noise filtering methods\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n pass\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise\n Args:\n X (np.array): features\n y (np.array): target labels\n \"\"\"\n pass\n\n def get_params(self, deep=False):\n \"\"\"\n Return parameters\n\n Returns:\n dict: dictionary of parameters\n \"\"\"\n\n return {}\n\n def set_params(self, **params):\n \"\"\"\n Set parameters\n\n Args:\n params (dict): dictionary of parameters\n \"\"\"\n\n for key, value in params.items():\n setattr(self, key, value)\n\n return self\n\n\nclass TomekLinkRemoval(NoiseFilter):\n \"\"\"\n Tomek link removal\n\n References:\n * BibTex::\n\n @article{smoteNoise0,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods for\n Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA}\n }\n \"\"\"\n\n def __init__(self, strategy='remove_majority', n_jobs=1):\n \"\"\"\n Constructor of the noise filter.\n\n Args:\n strategy (str): noise removal strategy:\n 'remove_majority'/'remove_both'\n n_jobs (int): number of jobs\n \"\"\"\n super().__init__()\n\n self.check_isin(strategy, 'strategy', [\n 'remove_majority', 'remove_both'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.strategy = strategy\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise from dataset\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: dataset after noise removal\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n # using 2 neighbors because the first neighbor is the point itself\n nn = NearestNeighbors(n_neighbors=2, n_jobs=self.n_jobs)\n distances, indices = nn.fit(X).kneighbors(X)\n\n # identify links\n links = []\n for i in range(len(indices)):\n if indices[indices[i][1]][1] == i:\n if not y[indices[i][1]] == y[indices[indices[i][1]][1]]:\n links.append((i, indices[i][1]))\n\n # determine links to be removed\n to_remove = []\n for li in links:\n if self.strategy == 'remove_majority':\n if y[li[0]] == self.min_label:\n to_remove.append(li[1])\n else:\n to_remove.append(li[0])\n elif self.strategy == 'remove_both':\n to_remove.append(li[0])\n to_remove.append(li[1])\n else:\n m = 'No Tomek link strategy %s implemented' % self.strategy\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n to_remove = list(set(to_remove))\n\n return np.delete(X, to_remove, axis=0), np.delete(y, to_remove)\n\n\nclass CondensedNearestNeighbors(NoiseFilter):\n \"\"\"\n Condensed nearest neighbors\n\n References:\n * BibTex::\n\n @ARTICLE{condensed_nn,\n author={Hart, P.},\n journal={IEEE Transactions on Information Theory},\n title={The condensed nearest neighbor rule (Corresp.)},\n year={1968},\n volume={14},\n number={3},\n pages={515-516},\n keywords={Pattern classification},\n doi={10.1109/TIT.1968.1054155},\n ISSN={0018-9448},\n month={May}}\n \"\"\"\n\n def __init__(self, n_jobs=1):\n \"\"\"\n Constructor of the noise removing object\n\n Args:\n n_jobs (int): number of jobs\n \"\"\"\n super().__init__()\n\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise from dataset\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: dataset after noise removal\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n # Initial result set consists of all minority samples and 1 majority\n # sample\n\n X_maj = X[y == self.maj_label]\n X_hat = np.vstack([X[y == self.min_label], X_maj[0]])\n y_hat = np.hstack([np.repeat(self.min_label, len(X_hat)-1),\n [self.maj_label]])\n X_maj = X_maj[1:]\n\n # Adding misclassified majority elements repeatedly\n while True:\n knn = KNeighborsClassifier(n_neighbors=1, n_jobs=self.n_jobs)\n knn.fit(X_hat, y_hat)\n pred = knn.predict(X_maj)\n\n if np.all(pred == self.maj_label):\n break\n else:\n X_hat = np.vstack([X_hat, X_maj[pred != self.maj_label]])\n y_hat = np.hstack(\n [y_hat,\n np.repeat(self.maj_label, len(X_hat) - len(y_hat))])\n X_maj = np.delete(X_maj, np.where(\n pred != self.maj_label)[0], axis=0)\n if len(X_maj) == 0:\n break\n\n return X_hat, y_hat\n\n\nclass OneSidedSelection(NoiseFilter):\n \"\"\"\n References:\n * BibTex::\n\n @article{smoteNoise0,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods\n for Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA}\n }\n \"\"\"\n\n def __init__(self, n_jobs=1):\n \"\"\"\n Constructor of the noise removal object\n\n Args:\n n_jobs (int): number of jobs\n \"\"\"\n super().__init__()\n\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: cleaned features and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n t = TomekLinkRemoval(n_jobs=self.n_jobs)\n X0, y0 = t.remove_noise(X, y)\n cnn = CondensedNearestNeighbors(n_jobs=self.n_jobs)\n\n return cnn.remove_noise(X0, y0)\n\n\nclass CNNTomekLinks(NoiseFilter):\n \"\"\"\n References:\n * BibTex::\n\n @article{smoteNoise0,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods\n for Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA}\n }\n \"\"\"\n\n def __init__(self, n_jobs=1):\n \"\"\"\n Constructor of the noise removal object\n\n Args:\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: cleaned features and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n c = CondensedNearestNeighbors(n_jobs=self.n_jobs)\n X0, y0 = c.remove_noise(X, y)\n t = TomekLinkRemoval(n_jobs=self.n_jobs)\n\n return t.remove_noise(X0, y0)\n\n\nclass NeighborhoodCleaningRule(NoiseFilter):\n \"\"\"\n References:\n * BibTex::\n\n @article{smoteNoise0,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods for\n Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA}\n }\n \"\"\"\n\n def __init__(self, n_jobs=1):\n \"\"\"\n Constructor of the noise removal object\n\n Args:\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: cleaned features and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n # fitting nearest neighbors with proposed parameter\n # using 4 neighbors because the first neighbor is the point itself\n nn = NearestNeighbors(n_neighbors=4, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X)\n\n # identifying the samples to be removed\n to_remove = []\n for i in range(len(X)):\n if (y[i] == self.maj_label and\n mode(y[indices[i][1:]]) == self.min_label):\n # if sample i is majority and the decision based on\n # neighbors is minority\n to_remove.append(i)\n elif (y[i] == self.min_label and\n mode(y[indices[i][1:]]) == self.maj_label):\n # if sample i is minority and the decision based on\n # neighbors is majority\n for j in indices[i][1:]:\n if y[j] == self.maj_label:\n to_remove.append(j)\n\n # removing the noisy samples and returning the results\n to_remove = list(set(to_remove))\n return np.delete(X, to_remove, axis=0), np.delete(y, to_remove)\n\n\nclass EditedNearestNeighbors(NoiseFilter):\n \"\"\"\n References:\n * BibTex::\n\n @article{smoteNoise0,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods for\n Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA}\n }\n \"\"\"\n\n def __init__(self, remove='both', n_jobs=1):\n \"\"\"\n Constructor of the noise removal object\n\n Args:\n remove (str): class to remove from 'both'/'min'/'maj'\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n\n self.check_isin(remove, 'remove', ['both', 'min', 'maj'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.remove = remove\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: cleaned features and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n if len(X) < 4:\n _logger.info(self.__class__.__name__ + ': ' +\n \"Not enough samples for noise removal\")\n return X.copy(), y.copy()\n\n nn = NearestNeighbors(n_neighbors=4, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X)\n\n to_remove = []\n for i in range(len(X)):\n if not y[i] == mode(y[indices[i][1:]]):\n if (self.remove == 'both' or\n (self.remove == 'min' and y[i] == self.min_label) or\n (self.remove == 'maj' and y[i] == self.maj_label)):\n to_remove.append(i)\n\n return np.delete(X, to_remove, axis=0), np.delete(y, to_remove)\n\n def get_params(self):\n \"\"\"\n Get noise removal parameters\n\n Returns:\n dict: dictionary of parameters\n \"\"\"\n return {'remove': self.remove}\n\n\nclass OverSampling(StatisticsMixin,\n ParameterCheckingMixin,\n ParameterCombinationsMixin,\n RandomStateMixin):\n \"\"\"\n Base class of oversampling methods\n \"\"\"\n\n categories = []\n\n cat_noise_removal = 'NR'\n cat_dim_reduction = 'DR'\n cat_uses_classifier = 'Clas'\n cat_sample_componentwise = 'SCmp'\n cat_sample_ordinary = 'SO'\n cat_sample_copy = 'SCpy'\n cat_memetic = 'M'\n cat_density_estimation = 'DE'\n cat_density_based = 'DB'\n cat_extensive = 'Ex'\n cat_changes_majority = 'CM'\n cat_uses_clustering = 'Clus'\n cat_borderline = 'BL'\n cat_application = 'A'\n\n def __init__(self):\n pass\n\n def det_n_to_sample(self, strategy, n_maj, n_min):\n \"\"\"\n Determines the number of samples to generate\n Args:\n strategy (str/float): if float, the fraction of the difference\n of the minority and majority numbers to\n generate, like 0.1 means that 10% of the\n difference will be generated if str,\n like 'min2maj', the minority class will\n be upsampled to match the cardinality\n of the majority class\n \"\"\"\n if isinstance(strategy, float) or isinstance(strategy, int):\n return max([0, int((n_maj - n_min)*strategy)])\n else:\n m = \"Value %s for parameter strategy is not supported\" % strategy\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def sample_between_points(self, x, y):\n \"\"\"\n Sample randomly along the line between two points.\n Args:\n x (np.array): point 1\n y (np.array): point 2\n Returns:\n np.array: the new sample\n \"\"\"\n return x + (y - x)*self.random_state.random_sample()\n\n def sample_between_points_componentwise(self, x, y, mask=None):\n \"\"\"\n Sample each dimension separately between the two points.\n Args:\n x (np.array): point 1\n y (np.array): point 2\n mask (np.array): array of 0,1s - specifies which dimensions\n to sample\n Returns:\n np.array: the new sample being generated\n \"\"\"\n if mask is None:\n return x + (y - x)*self.random_state.random_sample()\n else:\n return x + (y - x)*self.random_state.random_sample()*mask\n\n def sample_by_jittering(self, x, std):\n \"\"\"\n Sample by jittering.\n Args:\n x (np.array): base point\n std (float): standard deviation\n Returns:\n np.array: the new sample\n \"\"\"\n return x + (self.random_state.random_sample() - 0.5)*2.0*std\n\n def sample_by_jittering_componentwise(self, x, std):\n \"\"\"\n Sample by jittering componentwise.\n Args:\n x (np.array): base point\n std (np.array): standard deviation\n Returns:\n np.array: the new sample\n \"\"\"\n return x + (self.random_state.random_sample(len(x))-0.5)*2.0 * std\n\n def sample_by_gaussian_jittering(self, x, std):\n \"\"\"\n Sample by Gaussian jittering\n Args:\n x (np.array): base point\n std (np.array): standard deviation\n Returns:\n np.array: the new sample\n \"\"\"\n return self.random_state.normal(x, std)\n\n def sample(self, X, y):\n \"\"\"\n The samplig function reimplemented in child classes\n Args:\n X (np.matrix): features\n y (np.array): labels\n Returns:\n np.matrix, np.array: sampled X and y\n \"\"\"\n return X, y\n\n def fit_resample(self, X, y):\n \"\"\"\n Alias of the function \"sample\" for compatibility with imbalanced-learn\n pipelines\n \"\"\"\n return self.sample(X, y)\n\n def sample_with_timing(self, X, y):\n begin = time.time()\n X_samp, y_samp = self.sample(X, y)\n _logger.info(self.__class__.__name__ + \": \" +\n (\"runtime: %f\" % (time.time() - begin)))\n return X_samp, y_samp\n\n def preprocessing_transform(self, X):\n \"\"\"\n Transforms new data according to the possible transformation\n implemented by the function \"sample\".\n Args:\n X (np.matrix): features\n Returns:\n np.matrix: transformed features\n \"\"\"\n return X\n\n def get_params(self, deep=False):\n \"\"\"\n Returns the parameters of the object as a dictionary.\n Returns:\n dict: the parameters of the object\n \"\"\"\n pass\n\n def set_params(self, **params):\n \"\"\"\n Set parameters\n\n Args:\n params (dict): dictionary of parameters\n \"\"\"\n\n for key, value in params.items():\n setattr(self, key, value)\n\n return self\n\n def descriptor(self):\n \"\"\"\n Returns:\n str: JSON description of the current sampling object\n \"\"\"\n return str((self.__class__.__name__, str(self.get_params())))\n\n def __str__(self):\n return self.descriptor()\n\n\nclass UnderSampling(StatisticsMixin,\n ParameterCheckingMixin,\n ParameterCombinationsMixin):\n \"\"\"\n Base class of undersampling approaches.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructorm\n \"\"\"\n super().__init__()\n\n def sample(self, X, y):\n \"\"\"\n Carry out undersampling\n Args:\n X (np.matrix): features\n y (np.array): labels\n Returns:\n np.matrix, np.array: sampled X and y\n \"\"\"\n pass\n\n def get_params(self, deep=False):\n \"\"\"\n Returns the parameters of the object as a dictionary.\n Returns:\n dict: the parameters of the object\n \"\"\"\n pass\n\n def descriptor(self):\n \"\"\"\n Returns:\n str: JSON description of the current sampling object\n \"\"\"\n return str((self.__class__.__name__, str(self.get_params())))\n\n\nclass NoSMOTE(OverSampling):\n \"\"\"\n The goal of this class is to provide a functionality to send data through\n on any model selection/evaluation pipeline with no oversampling carried\n out. It can be used to get baseline estimates on preformance.\n \"\"\"\n\n categories = []\n\n def __init__(self, random_state=None):\n \"\"\"\n Constructor of the NoSMOTE object.\n\n Args:\n random_state (int/np.random.RandomState/None): dummy parameter for \\\n the compatibility of interfaces\n \"\"\"\n super().__init__()\n\n @classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return cls.generate_parameter_combinations({}, raw=False)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n return X.copy(), y.copy()\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {}\n\n\nclass SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote,\n author={Chawla, N. V. and Bowyer, K. W. and Hall, L. O. and\n Kegelmeyer, W. P.},\n title={{SMOTE}: synthetic minority over-sampling technique},\n journal={Journal of Artificial Intelligence Research},\n volume={16},\n year={2002},\n pages={321--357}\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the SMOTE object\n\n Args:\n proportion (float): proportion of the difference of n_maj and\n n_min to sample e.g. 1.0\n means that after sampling the number of minority samples will\n be equal to the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n technique\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determining the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n # _logger.warning(self.__class__.__name__ +\n # \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting the model\n n_neigh = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neigh, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n if n_to_sample == 0:\n return X.copy(), y.copy()\n\n # generating samples\n base_indices = self.random_state.choice(list(range(len(X_min))),\n n_to_sample)\n neighbor_indices = self.random_state.choice(list(range(1, n_neigh)),\n n_to_sample)\n\n X_base = X_min[base_indices]\n X_neighbor = X_min[ind[base_indices, neighbor_indices]]\n\n samples = X_base + np.multiply(self.random_state.rand(n_to_sample,\n 1),\n X_neighbor - X_base)\n\n x_syn = samples.copy()\n y_syn = np.hstack([self.min_label]*n_to_sample)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.hstack([self.min_label]*n_to_sample)])), X, y, x_syn, y_syn\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_TomekLinks(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_tomeklinks_enn,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods for\n Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA},\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_noise_removal,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the SMOTE object\n\n Args:\n proportion (float): proportion of the difference of n_maj and\n n_min to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): control parameter of the nearest neighbor\n technique\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return SMOTE.parameter_combinations(raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n smote = SMOTE(self.proportion,\n self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n X_new, y_new = smote.sample(X, y)\n\n t = TomekLinkRemoval(strategy='remove_both', n_jobs=self.n_jobs)\n\n X_samp, y_samp = t.remove_noise(X_new, y_new)\n\n if len(X_samp) == 0:\n m = (\"All samples have been removed, \"\n \"returning the original dataset.\")\n _logger.info(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n return X_samp, y_samp\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_ENN(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_tomeklinks_enn,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods for\n Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA},\n }\n\n Notes:\n * Can remove too many of minority samples.\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_noise_removal,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the SMOTE object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): control parameter of the nearest neighbor\n technique\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return SMOTE.parameter_combinations(raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n smote = SMOTE(self.proportion, self.n_neighbors,\n n_jobs=self.n_jobs, random_state=self.random_state)\n X_new, y_new = smote.sample(X, y)\n\n enn = EditedNearestNeighbors(n_jobs=self.n_jobs)\n\n return enn.remove_noise(X_new, y_new)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Borderline_SMOTE1(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{borderlineSMOTE,\n author=\"Han, Hui\n and Wang, Wen-Yuan\n and Mao, Bing-Huan\",\n editor=\"Huang, De-Shuang\n and Zhang, Xiao-Ping\n and Huang, Guang-Bin\",\n title=\"Borderline-SMOTE: A New Over-Sampling Method\n in Imbalanced Data Sets Learning\",\n booktitle=\"Advances in Intelligent Computing\",\n year=\"2005\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"878--887\",\n isbn=\"978-3-540-31902-3\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_extensive,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n k_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): control parameter of the nearest neighbor\n technique for determining the borderline\n k_neighbors (int): control parameter of the nearest neighbor\n technique for sampling\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_greater_or_equal(k_neighbors, 'k_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.k_neighbors = k_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'k_neighbors': [3, 5, 7]}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determining number of samples to be generated\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # fitting model\n X_min = X[y == self.min_label]\n\n n_neighbors = min([len(X), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_min)\n\n # determining minority samples in danger\n noise = []\n danger = []\n for i in range(len(indices)):\n if self.n_neighbors == sum(y[indices[i][1:]] == self.maj_label):\n noise.append(i)\n elif mode(y[indices[i][1:]]) == self.maj_label:\n danger.append(i)\n X_danger = X_min[danger]\n X_min = np.delete(X_min, np.array(noise).astype(int), axis=0)\n\n if len(X_danger) == 0:\n _logger.info(self.__class__.__name__ +\n \": \" + \"No samples in danger\")\n return X.copy(), y.copy()\n\n # fitting nearest neighbors model to minority samples\n k_neigh = min([len(X_min), self.k_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=k_neigh, n_jobs=self.n_jobs)\n nn.fit(X_min)\n # extracting neighbors of samples in danger\n distances, indices = nn.kneighbors(X_danger)\n\n # generating samples near points in danger\n base_indices = self.random_state.choice(list(range(len(X_danger))),\n n_to_sample)\n neighbor_indices = self.random_state.choice(list(range(1, k_neigh)),\n n_to_sample)\n\n X_base = X_danger[base_indices]\n X_neighbor = X_min[indices[base_indices, neighbor_indices]]\n\n samples = X_base + \\\n np.multiply(self.random_state.rand(\n n_to_sample, 1), X_neighbor - X_base)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.hstack([self.min_label]*n_to_sample)]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'k_neighbors': self.k_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Borderline_SMOTE2(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{borderlineSMOTE,\n author=\"Han, Hui\n and Wang, Wen-Yuan\n and Mao, Bing-Huan\",\n editor=\"Huang, De-Shuang\n and Zhang, Xiao-Ping\n and Huang, Guang-Bin\",\n title=\"Borderline-SMOTE: A New Over-Sampling\n Method in Imbalanced Data Sets Learning\",\n booktitle=\"Advances in Intelligent Computing\",\n year=\"2005\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"878--887\",\n isbn=\"978-3-540-31902-3\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_extensive,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n k_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and\n n_min to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): control parameter of the nearest neighbor\n technique for determining the borderline\n k_neighbors (int): control parameter of the nearest neighbor\n technique for sampling\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_greater_or_equal(k_neighbors, 'k_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.k_neighbors = k_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'k_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determining number of samples to be generated\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # fitting nearest neighbors model\n X_min = X[y == self.min_label]\n\n n_neighbors = min([self.n_neighbors+1, len(X)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_min)\n\n # determining minority samples in danger\n noise = []\n danger = []\n for i in range(len(indices)):\n if self.n_neighbors == sum(y[indices[i][1:]] == self.maj_label):\n noise.append(i)\n elif mode(y[indices[i][1:]]) == self.maj_label:\n danger.append(i)\n X_danger = X_min[danger]\n X_min = np.delete(X_min, np.array(noise).astype(int), axis=0)\n\n if len(X_min) < 2:\n m = (\"The number of minority samples after preprocessing (%d) is \"\n \"not enough for sampling\")\n m = m % (len(X_min))\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n if len(X_danger) == 0:\n m = \"No samples in danger\"\n _logger.info(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n # fitting nearest neighbors model to minority samples\n k_neigh = self.k_neighbors + 1\n k_neigh = min([k_neigh, len(X)])\n nn = NearestNeighbors(n_neighbors=k_neigh, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_danger)\n\n # generating the samples\n base_indices = self.random_state.choice(\n list(range(len(X_danger))), n_to_sample)\n neighbor_indices = self.random_state.choice(\n list(range(1, k_neigh)), n_to_sample)\n\n X_base = X_danger[base_indices]\n X_neighbor = X[indices[base_indices, neighbor_indices]]\n diff = X_neighbor - X_base\n r = self.random_state.rand(n_to_sample, 1)\n mask = y[neighbor_indices] == self.maj_label\n r[mask] = r[mask]*0.5\n\n samples = X_base + np.multiply(r, diff)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.hstack([self.min_label]*n_to_sample)]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'k_neighbors': self.k_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ADASYN(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{adasyn,\n author={He, H. and Bai, Y. and Garcia,\n E. A. and Li, S.},\n title={{ADASYN}: adaptive synthetic sampling\n approach for imbalanced learning},\n booktitle={Proceedings of IJCNN},\n year={2008},\n pages={1322--1328}\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_extensive,\n OverSampling.cat_borderline,\n OverSampling.cat_density_based]\n\n def __init__(self,\n n_neighbors=5,\n d_th=0.9,\n beta=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_neighbors (int): control parameter of the nearest neighbor\n component\n d_th (float): tolerated deviation level from balancedness\n beta (float): target level of balancedness, same as proportion\n in other techniques\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_greater_or_equal(d_th, 'd_th', 0)\n self.check_greater_or_equal(beta, 'beta', 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_neighbors = n_neighbors\n self.d_th = d_th\n self.beta = beta\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'n_neighbors': [3, 5, 7, 9],\n 'd_th': [0.9],\n 'beta': [1.0, 0.75, 0.5, 0.25]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # extracting minority samples\n X_min = X[y == self.min_label]\n\n # checking if sampling is needed\n m_min = len(X_min)\n m_maj = len(X) - m_min\n\n n_to_sample = (m_maj - m_min)*self.beta\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n d = float(m_min)/m_maj\n if d > self.d_th:\n return X.copy(), y.copy()\n\n # fitting nearest neighbors model to all samples\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_min)\n\n # determining the distribution of points to be generated\n r = []\n for i in range(len(indices)):\n r.append(sum(y[indices[i][1:]] ==\n self.maj_label)/self.n_neighbors)\n r = np.array(r)\n if sum(r) > 0:\n r = r/sum(r)\n\n if any(np.isnan(r)) or sum(r) == 0:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"not enough non-noise samples for oversampling\")\n return X.copy(), y.copy()\n\n # fitting nearest neighbors models to minority samples\n n_neigh = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neigh, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distances, indices = nn.kneighbors(X_min)\n\n # sampling points\n base_indices = self.random_state.choice(\n list(range(len(X_min))), size=int(n_to_sample), p=r)\n neighbor_indices = self.random_state.choice(\n list(range(1, n_neigh)), int(n_to_sample))\n\n X_base = X_min[base_indices]\n X_neighbor = X_min[indices[base_indices, neighbor_indices]]\n diff = X_neighbor - X_base\n r = self.random_state.rand(int(n_to_sample), 1)\n\n samples = X_base + np.multiply(r, diff)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.hstack([self.min_label]*int(n_to_sample))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_neighbors': self.n_neighbors,\n 'd_th': self.d_th,\n 'beta': self.beta,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass AHC(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{AHC,\n title = \"Learning from imbalanced data in surveillance\n of nosocomial infection\",\n journal = \"Artificial Intelligence in Medicine\",\n volume = \"37\",\n number = \"1\",\n pages = \"7 - 18\",\n year = \"2006\",\n note = \"Intelligent Data Analysis in Medicine\",\n issn = \"0933-3657\",\n doi = \"https://doi.org/10.1016/j.artmed.2005.03.002\",\n url = {http://www.sciencedirect.com/science/article/\n pii/S0933365705000850},\n author = \"Gilles Cohen and Mélanie Hilario and Hugo Sax\n and Stéphane Hugonnet and Antoine Geissbuhler\",\n keywords = \"Nosocomial infection, Machine learning,\n Support vector machines, Data imbalance\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_application]\n\n def __init__(self, strategy='min', n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n strategy (str): which class to sample (min/maj/minmaj)\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_isin(strategy, 'strategy', ['min', 'maj', 'minmaj'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.strategy = strategy\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'strategy': ['min', 'maj', 'minmaj']}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample_majority(self, X, n_clusters):\n \"\"\"\n Sample the majority class\n\n Args:\n X (np.ndarray): majority samples\n n_clusters (int): number of clusters to find\n\n Returns:\n np.ndarray: downsampled vectors\n \"\"\"\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X)\n return kmeans.cluster_centers_\n\n def sample_minority(self, X):\n \"\"\"\n Sampling the minority class\n\n Args:\n X (np.ndarray): minority samples\n\n Returns:\n np.ndarray: the oversampled set of vectors\n \"\"\"\n ac = AgglomerativeClustering(n_clusters=1)\n ac.fit(X)\n n_samples = len(X)\n\n cc = [None]*len(ac.children_)\n weights = [None]*len(ac.children_)\n\n def cluster_centers(children, i, cc, weights):\n \"\"\"\n Extract cluster centers\n\n Args:\n children (np.array): indices of children\n i (int): index to process\n cc (np.array): cluster centers\n weights (np.array): cluster weights\n\n Returns:\n int, float: new cluster center, new weight\n \"\"\"\n if i < n_samples:\n return X[i], 1.0\n\n if cc[i - n_samples] is None:\n a, w_a = cluster_centers(\n children, children[i - n_samples][0], cc, weights)\n b, w_b = cluster_centers(\n children, children[i - n_samples][1], cc, weights)\n cc[i - n_samples] = (w_a*a + w_b*b)/(w_a + w_b)\n weights[i - n_samples] = w_a + w_b\n\n return cc[i - n_samples], weights[i - n_samples]\n\n cluster_centers(ac.children_, ac.children_[-1][-1] + 1, cc, weights)\n\n return np.vstack(cc)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # extracting minority samples\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n if self.strategy == 'maj':\n X_maj_resampled = self.sample_majority(X_maj, len(X_min))\n return (np.vstack([X_min, X_maj_resampled]),\n np.hstack([np.repeat(self.min_label, len(X_min)),\n np.repeat(self.maj_label,\n len(X_maj_resampled))]))\n elif self.strategy == 'min':\n X_min_resampled = self.sample_minority(X_min)\n return (np.vstack([X_min_resampled, X_min, X_maj]),\n np.hstack([np.repeat(self.min_label,\n (len(X_min_resampled) + len(X_min))),\n np.repeat(self.maj_label, len(X_maj))]))\n elif self.strategy == 'minmaj':\n X_min_resampled = self.sample_minority(X_min)\n n_maj_sample = min([len(X_maj), len(X_min_resampled) + len(X_min)])\n X_maj_resampled = self.sample_majority(X_maj, n_maj_sample)\n return (np.vstack([X_min_resampled, X_min, X_maj_resampled]),\n np.hstack([np.repeat(self.min_label,\n (len(X_min_resampled) + len(X_min))),\n np.repeat(self.maj_label,\n len(X_maj_resampled))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'strategy': self.strategy,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass LLE_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{lle_smote,\n author={Wang, J. and Xu, M. and Wang,\n H. and Zhang, J.},\n booktitle={2006 8th international Conference\n on Signal Processing},\n title={Classification of Imbalanced Data by Using\n the SMOTE Algorithm and Locally Linear\n Embedding},\n year={2006},\n volume={3},\n number={},\n pages={},\n keywords={artificial intelligence;\n biomedical imaging;medical computing;\n imbalanced data classification;\n SMOTE algorithm;\n locally linear embedding;\n medical imaging intelligence;\n synthetic minority oversampling\n technique;\n high-dimensional data;\n low-dimensional space;\n Biomedical imaging;\n Back;Training data;\n Data mining;Biomedical engineering;\n Research and development;\n Electronic mail;Pattern recognition;\n Performance analysis;\n Classification algorithms},\n doi={10.1109/ICOSP.2006.345752},\n ISSN={2164-5221},\n month={Nov}}\n\n Notes:\n * There might be numerical issues if the nearest neighbors contain\n some element multiple times.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_dim_reduction]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_components=2,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj\n and n_min to sample e.g. 1.0 means that after\n sampling the number of minority samples will\n be equal to the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n n_components (int): dimensionality of the embedding space\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 2)\n self.check_greater_or_equal(n_components, 'n_components', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_components = n_components\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_components': [2, 3, 5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # extracting minority samples\n X_min = X[y == self.min_label]\n\n # do the locally linear embedding\n lle = LocallyLinearEmbedding(\n self.n_neighbors, self.n_components, n_jobs=self.n_jobs)\n try:\n lle.fit(X_min)\n except Exception as e:\n return X.copy(), y.copy()\n X_min_transformed = lle.transform(X_min)\n\n # fitting the nearest neighbors model for sampling\n n_neighbors = min([self.n_neighbors+1, len(X_min_transformed)])\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs).fit(X_min_transformed)\n dist, ind = nn.kneighbors(X_min_transformed)\n\n def solve_for_weights(xi, Z):\n \"\"\"\n Solve for locally linear embedding weights\n\n Args:\n xi (np.array): vector\n Z (np.matrix): matrix of neighbors in rows\n\n Returns:\n np.array: reconstruction weights\n\n Following https://cs.nyu.edu/~roweis/lle/algorithm.html\n \"\"\"\n Z = Z - xi\n Z = Z.T\n C = np.dot(Z.T, Z)\n try:\n w = np.linalg.solve(C, np.repeat(1.0, len(C)))\n if np.linalg.norm(w) > 1e8:\n w = np.repeat(1.0, len(C))\n except Exception as e:\n w = np.repeat(1.0, len(C))\n return w/np.sum(w)\n\n # generating samples\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.randint(len(X_min))\n random_coords = self.random_state.choice(ind[idx][1:])\n xi = self.sample_between_points(X_min_transformed[idx],\n X_min_transformed[random_coords])\n Z = X_min_transformed[ind[idx][1:]]\n w = solve_for_weights(xi, Z)\n samples.append(np.dot(w, X_min[ind[idx][1:]]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_components': self.n_components,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass distance_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{distance_smote,\n author={de la Calleja, J. and Fuentes, O.},\n booktitle={Proceedings of the Twentieth\n International Florida Artificial\n Intelligence},\n title={A distance-based over-sampling method\n for learning from imbalanced data sets},\n year={2007},\n volume={3},\n pages={634--635}\n }\n\n Notes:\n * It is not clear what the authors mean by \"weighted distance\".\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # extracting minority samples\n X_min = X[y == self.min_label]\n\n # fitting the model\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.randint(len(X_min))\n mean_vector = np.mean(X_min[ind[idx][1:]], axis=0)\n samples.append(self.sample_between_points(X_min[idx], mean_vector))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMMO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{smmo,\n author = {de la Calleja, Jorge and Fuentes, Olac\n and González, Jesús},\n booktitle= {Proceedings of the Twenty-First\n International Florida Artificial\n Intelligence Research Society\n Conference},\n year = {2008},\n month = {01},\n pages = {276-281},\n title = {Selecting Minority Examples from\n Misclassified Data for Over-Sampling.}\n }\n\n Notes:\n * In this paper the ensemble is not specified. I have selected\n some very fast, basic classifiers.\n * Also, it is not clear what the authors mean by \"weighted distance\".\n * The original technique is not prepared for the case when no minority\n samples are classified correctly be the ensemble.\n \"\"\"\n\n categories = [OverSampling.cat_borderline,\n OverSampling.cat_extensive,\n OverSampling.cat_uses_classifier]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n ensemble=[QuadraticDiscriminantAnalysis(),\n DecisionTreeClassifier(random_state=2),\n GaussianNB()],\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n ensemble (list): list of classifiers, if None, default list of\n classifiers is used\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n try:\n len_ens = len(ensemble)\n except Exception as e:\n raise ValueError('The ensemble needs to be a list-like object')\n if len_ens == 0:\n raise ValueError('At least 1 classifier needs to be specified')\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.ensemble = ensemble\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n ensembles = [[QuadraticDiscriminantAnalysis(),\n DecisionTreeClassifier(random_state=2),\n GaussianNB()]]\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'ensemble': ensembles}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # training and in-sample prediction (out-of-sample by k-fold cross\n # validation might be better)\n predictions = []\n for e in self.ensemble:\n predictions.append(e.fit(X, y).predict(X))\n\n # constructing ensemble prediction\n pred = np.where(np.sum(np.vstack(predictions), axis=0)\n > len(self.ensemble)/2, 1, 0)\n\n # create mask of minority samples to sample\n mask_to_sample = np.where(np.logical_and(np.logical_not(\n np.equal(pred, y)), y == self.min_label))[0]\n if len(mask_to_sample) < 2:\n m = \"Not enough minority samples selected %d\" % len(mask_to_sample)\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_min_to_sample = X[mask_to_sample]\n\n # fitting nearest neighbors model for sampling\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min_to_sample)\n\n # doing the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.randint(len(X_min_to_sample))\n mean = np.mean(X_min[ind[idx][1:]], axis=0)\n samples.append(self.sample_between_points(\n X_min_to_sample[idx], mean))\n\n return (np.vstack([X, np.vstack([samples])]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'ensemble': self.ensemble,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass polynom_fit_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{polynomial_fit_smote,\n author={Gazzah, S. and Amara, N. E. B.},\n booktitle={2008 The Eighth IAPR International\n Workshop on Document Analysis Systems},\n title={New Oversampling Approaches Based on\n Polynomial Fitting for Imbalanced Data\n Sets},\n year={2008},\n volume={},\n number={},\n pages={677-684},\n keywords={curve fitting;learning (artificial\n intelligence);mesh generation;pattern\n classification;polynomials;sampling\n methods;support vector machines;\n oversampling approach;polynomial\n fitting function;imbalanced data\n set;pattern classification task;\n class-modular strategy;support\n vector machine;true negative rate;\n true positive rate;star topology;\n bus topology;polynomial curve\n topology;mesh topology;Polynomials;\n Topology;Support vector machines;\n Support vector machine classification;\n Pattern classification;Performance\n evaluation;Training data;Text\n analysis;Data engineering;Convergence;\n writer identification system;majority\n class;minority class;imbalanced data\n sets;polynomial fitting functions;\n class-modular strategy},\n doi={10.1109/DAS.2008.74},\n ISSN={},\n month={Sept},}\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n topology='star',\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n topoplogy (str): 'star'/'bus'/'mesh'\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0.0)\n if topology.startswith('poly'):\n self.check_greater_or_equal(\n int(topology.split('_')[-1]), 'topology', 1)\n else:\n self.check_isin(topology, \"topology\", ['star', 'bus', 'mesh'])\n\n self.proportion = proportion\n self.topology = topology\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'topology': ['star', 'bus', 'mesh',\n 'poly_1', 'poly_2', 'poly_3']}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # extracting minority samples\n X_min = X[y == self.min_label]\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n samples = []\n if self.topology == 'star':\n # Implementation of the star topology\n X_mean = np.mean(X_min, axis=0)\n k = max([1, int(np.rint(n_to_sample/len(X_min)))])\n for x in X_min:\n diff = X_mean - x\n for i in range(1, k+1):\n samples.append(x + float(i)/(k+1)*diff)\n elif self.topology == 'bus':\n # Implementation of the bus topology\n k = max([1, int(np.rint(n_to_sample/len(X_min)))])\n for i in range(1, len(X_min)):\n diff = X_min[i-1] - X_min[i]\n for j in range(1, k+1):\n samples.append(X_min[i] + float(j)/(k+1)*diff)\n elif self.topology == 'mesh':\n # Implementation of the mesh topology\n if len(X_min)**2 > n_to_sample:\n while len(samples) < n_to_sample:\n random_i = self.random_state.randint(len(X_min))\n random_j = self.random_state.randint(len(X_min))\n diff = X_min[random_i] - X_min[random_j]\n samples.append(X_min[random_i] + 0.5*diff)\n else:\n n_combs = (len(X_min)*(len(X_min)-1)/2)\n k = max([1, int(np.rint(n_to_sample/n_combs))])\n for i in range(len(X_min)):\n for j in range(len(X_min)):\n diff = X_min[i] - X_min[j]\n for li in range(1, k+1):\n samples.append(X_min[j] + float(li)/(k+1)*diff)\n elif self.topology.startswith('poly'):\n # Implementation of the polynomial topology\n deg = int(self.topology.split('_')[1])\n dim = len(X_min[0])\n\n def fit_poly(d):\n return np.poly1d(np.polyfit(np.arange(len(X_min)),\n X_min[:, d], deg))\n\n polys = [fit_poly(d) for d in range(dim)]\n\n for d in range(dim):\n random_sample = self.random_state.random_sample()*len(X_min)\n samples_gen = [polys[d](random_sample)\n for _ in range(n_to_sample)]\n samples.append(np.array(samples_gen))\n samples = np.vstack(samples).T\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'topology': self.topology,\n 'random_state': self._random_state_init}\n\n\nclass Stefanowski(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{stefanowski,\n author = {Stefanowski, Jerzy and Wilk, Szymon},\n title = {Selective Pre-processing of Imbalanced Data for\n Improving Classification Performance},\n booktitle = {Proceedings of the 10th International Conference\n on Data Warehousing and Knowledge Discovery},\n series = {DaWaK '08},\n year = {2008},\n isbn = {978-3-540-85835-5},\n location = {Turin, Italy},\n pages = {283--292},\n numpages = {10},\n url = {http://dx.doi.org/10.1007/978-3-540-85836-2_27},\n doi = {10.1007/978-3-540-85836-2_27},\n acmid = {1430591},\n publisher = {Springer-Verlag},\n address = {Berlin, Heidelberg},\n }\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_noise_removal,\n OverSampling.cat_sample_copy,\n OverSampling.cat_borderline]\n\n def __init__(self, strategy='weak_amp', n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n strategy (str): 'weak_amp'/'weak_amp_relabel'/'strong_amp'\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_isin(strategy,\n 'strategy',\n ['weak_amp', 'weak_amp_relabel', 'strong_amp'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.strategy = strategy\n self.n_jobs = n_jobs\n\n # this method does not maintain randomness, the parameter is\n # introduced for the compatibility of interfaces\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n if not raw:\n return [{'strategy': 'weak_amp'},\n {'strategy': 'weak_amp_relabel'},\n {'strategy': 'strong_amp'}, ]\n else:\n return {'strategy': ['weak_amp', 'weak_amp_relabel', 'strong_amp']}\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if self.class_stats[self.min_label] < 6:\n m = (\"The number of minority samples (%d) is not\"\n \" enough for sampling\")\n m = m % (self.class_stats[self.min_label])\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n # copying y as its values will change\n y = y.copy()\n # fitting the nearest neighbors model for noise filtering, 4 neighbors\n # instead of 3 as the closest neighbor to a point is itself\n nn = NearestNeighbors(n_neighbors=min(4, len(X)), n_jobs=self.n_jobs)\n nn.fit(X)\n distance, indices = nn.kneighbors(X)\n\n # fitting the nearest neighbors model for sample generation,\n # 6 neighbors instead of 5 for the same reason\n nn5 = NearestNeighbors(n_neighbors=min(6, len(X)), n_jobs=self.n_jobs)\n nn5.fit(X)\n distance5, indices5 = nn5.kneighbors(X)\n\n # determining noisy and safe flags\n flags = []\n for i in range(len(indices)):\n if mode(y[indices[i][1:]]) == y[i]:\n flags.append('safe')\n else:\n flags.append('noisy')\n flags = np.array(flags)\n\n D = (y == self.maj_label) & (flags == 'noisy')\n minority_indices = np.where(y == self.min_label)[0]\n\n samples = []\n if self.strategy == 'weak_amp' or self.strategy == 'weak_amp_relabel':\n # weak mplification - the number of copies is the number of\n # majority nearest neighbors\n for i in minority_indices:\n if flags[i] == 'noisy':\n k = np.sum(np.logical_and(\n y[indices[i][1:]] == self.maj_label,\n flags[indices[i][1:]] == 'safe'))\n for _ in range(k):\n samples.append(X[i])\n if self.strategy == 'weak_amp_relabel':\n # relabling - noisy majority neighbors are relabelled to minority\n for i in minority_indices:\n if flags[i] == 'noisy':\n for j in indices[i][1:]:\n if y[j] == self.maj_label and flags[j] == 'noisy':\n y[j] = self.min_label\n D[j] = False\n if self.strategy == 'strong_amp':\n # safe minority samples are copied as many times as many safe\n # majority samples are among the nearest neighbors\n for i in minority_indices:\n if flags[i] == 'safe':\n k = np.sum(np.logical_and(\n y[indices[i][1:]] == self.maj_label,\n flags[indices[i][1:]] == 'safe'))\n for _ in range(k):\n samples.append(X[i])\n # if classified correctly by knn(5), noisy minority samples are\n # amplified by creating as many copies as many save majority\n # samples in its neighborhood are present otherwise amplify\n # based on the 5 neighborhood\n for i in minority_indices:\n if flags[i] == 'noisy':\n if mode(y[indices5[i][1:]]) == y[i]:\n k = np.sum(np.logical_and(\n y[indices[i][1:]] == self.maj_label,\n flags[indices[i][1:]] == 'safe'))\n else:\n k = np.sum(np.logical_and(\n y[indices5[i][1:]] == self.maj_label,\n flags[indices5[i][1:]] == 'safe'))\n for _ in range(k):\n samples.append(X[i])\n\n to_remove = np.where(D)[0]\n\n X_noise_removed = np.delete(X, to_remove, axis=0)\n y_noise_removed = np.delete(y, to_remove, axis=0)\n\n if len(samples) == 0 and len(X_noise_removed) > 10:\n m = \"no samples to add\"\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X_noise_removed, y_noise_removed\n elif len(samples) == 0:\n m = \"all samples removed as noise, returning the original dataset\"\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n return (np.vstack([X_noise_removed,\n np.vstack(samples)]),\n np.hstack([y_noise_removed,\n np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'strategy': self.strategy,\n 'n_jobs': self.n_jobs}\n\n\nclass ADOMS(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{adoms,\n author={Tang, S. and Chen, S.},\n booktitle={2008 International Conference on\n Information Technology and\n Applications in Biomedicine},\n title={The generation mechanism of synthetic\n minority class examples},\n year={2008},\n volume={},\n number={},\n pages={444-447},\n keywords={medical image processing;\n generation mechanism;synthetic\n minority class examples;class\n imbalance problem;medical image\n analysis;oversampling algorithm;\n Principal component analysis;\n Biomedical imaging;Medical\n diagnostic imaging;Information\n technology;Biomedical engineering;\n Noise generators;Concrete;Nearest\n neighbor searches;Data analysis;\n Image analysis},\n doi={10.1109/ITAB.2008.4570642},\n ISSN={2168-2194},\n month={May}}\n \"\"\"\n\n categories = [OverSampling.cat_dim_reduction,\n OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and\n n_min to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): parameter of the nearest neighbor component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0.0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distance, indices = nn.kneighbors(X_min)\n\n samples = []\n for _ in range(n_to_sample):\n index = self.random_state.randint(len(X_min))\n neighbors = X_min[indices[index]]\n\n # fitting the PCA\n pca = PCA(n_components=1)\n pca.fit(neighbors)\n\n # extracting the principal direction\n principal_direction = pca.components_[0]\n\n # do the sampling according to the description in the paper\n random_index = self.random_state.randint(1, len(neighbors))\n random_neighbor = neighbors[random_index]\n d = np.linalg.norm(random_neighbor - X_min[index])\n r = self.random_state.random_sample()\n inner_product = np.dot(random_neighbor - X_min[index],\n principal_direction)\n sign = 1.0 if inner_product > 0.0 else -1.0\n samples.append(X_min[index] + sign*r*d*principal_direction)\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Safe_Level_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{safe_level_smote,\n author = {\n Bunkhumpornpat, Chumphol and Sinapiromsaran,\n Krung and Lursinsap, Chidchanok},\n title = {Safe-Level-SMOTE: Safe-Level-Synthetic\n Minority Over-Sampling TEchnique for\n Handling the Class Imbalanced Problem},\n booktitle = {Proceedings of the 13th Pacific-Asia\n Conference on Advances in Knowledge\n Discovery and Data Mining},\n series = {PAKDD '09},\n year = {2009},\n isbn = {978-3-642-01306-5},\n location = {Bangkok, Thailand},\n pages = {475--482},\n numpages = {8},\n url = {http://dx.doi.org/10.1007/978-3-642-01307-2_43},\n doi = {10.1007/978-3-642-01307-2_43},\n acmid = {1533904},\n publisher = {Springer-Verlag},\n address = {Berlin, Heidelberg},\n keywords = {Class Imbalanced Problem, Over-sampling,\n SMOTE, Safe Level},\n }\n\n Notes:\n * The original method was not prepared for the case when no minority\n sample has minority neighbors.\n \"\"\"\n\n categories = [OverSampling.cat_borderline,\n OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1.0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # fitting nearest neighbors model\n n_neighbors = min([self.n_neighbors+1, len(X)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distance, indices = nn.kneighbors(X)\n\n minority_labels = (y == self.min_label)\n minority_indices = np.where(minority_labels)[0]\n\n # do the sampling\n numattrs = len(X[0])\n samples = []\n for _ in range(n_to_sample):\n index = self.random_state.randint(len(minority_indices))\n neighbor_index = self.random_state.choice(indices[index][1:])\n\n p = X[index]\n n = X[neighbor_index]\n\n # find safe levels\n sl_p = np.sum(y[indices[index][1:]] == self.min_label)\n sl_n = np.sum(y[indices[neighbor_index][1:]]\n == self.min_label)\n\n if sl_n > 0:\n sl_ratio = float(sl_p)/sl_n\n else:\n sl_ratio = np.inf\n\n if sl_ratio == np.inf and sl_p == 0:\n pass\n else:\n s = np.zeros(numattrs)\n for atti in range(numattrs):\n # iterate through attributes and do sampling according to\n # safe level\n if sl_ratio == np.inf and sl_p > 0:\n gap = 0.0\n elif sl_ratio == 1:\n gap = self.random_state.random_sample()\n elif sl_ratio > 1:\n gap = self.random_state.random_sample()*1.0/sl_ratio\n elif sl_ratio < 1:\n gap = (1 - sl_ratio) + \\\n self.random_state.random_sample()*sl_ratio\n dif = n[atti] - p[atti]\n s[atti] = p[atti] + gap*dif\n samples.append(s)\n\n if len(samples) == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"No samples generated\")\n return X.copy(), y.copy()\n else:\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MSMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{msmote,\n author = {Hu, Shengguo and Liang,\n Yanfeng and Ma, Lintao and He, Ying},\n title = {MSMOTE: Improving Classification\n Performance When Training Data\n is Imbalanced},\n booktitle = {Proceedings of the 2009 Second\n International Workshop on\n Computer Science and Engineering\n - Volume 02},\n series = {IWCSE '09},\n year = {2009},\n isbn = {978-0-7695-3881-5},\n pages = {13--17},\n numpages = {5},\n url = {https://doi.org/10.1109/WCSE.2009.756},\n doi = {10.1109/WCSE.2009.756},\n acmid = {1682710},\n publisher = {IEEE Computer Society},\n address = {Washington, DC, USA},\n keywords = {imbalanced data, over-sampling,\n SMOTE, AdaBoost, samples groups,\n SMOTEBoost},\n }\n\n Notes:\n * The original method was not prepared for the case when all\n minority samples are noise.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_noise_removal,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting the nearest neighbors model\n n_neighbors = min([len(X), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distance, indices = nn.kneighbors(X_min)\n\n noise_mask = np.repeat(False, len(X_min))\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n index = self.random_state.randint(len(X_min))\n\n n_p = np.sum(y[indices[index][1:]] == self.min_label)\n\n if n_p == self.n_neighbors:\n sample_type = 'security'\n elif n_p == 0:\n sample_type = 'noise'\n noise_mask[index] = True\n if np.all(noise_mask):\n _logger.info(\"All minority samples are noise\")\n return X.copy(), y.copy()\n else:\n sample_type = 'border'\n\n if sample_type == 'security':\n neighbor_index = self.random_state.choice(indices[index][1:])\n elif sample_type == 'border':\n neighbor_index = indices[index][1]\n else:\n continue\n\n s_gen = self.sample_between_points_componentwise(X_min[index],\n X[neighbor_index])\n samples.append(s_gen)\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass DE_oversampling(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{de_oversampling,\n author={Chen, L. and Cai, Z. and Chen, L. and\n Gu, Q.},\n booktitle={2010 Third International Conference\n on Knowledge Discovery and Data Mining},\n title={A Novel Differential Evolution-Clustering\n Hybrid Resampling Algorithm on Imbalanced\n Datasets},\n year={2010},\n volume={},\n number={},\n pages={81-85},\n keywords={pattern clustering;sampling methods;\n support vector machines;differential\n evolution;clustering algorithm;hybrid\n resampling algorithm;imbalanced\n datasets;support vector machine;\n minority class;mutation operators;\n crossover operators;data cleaning\n method;F-measure criterion;ROC area\n criterion;Support vector machines;\n Intrusion detection;Support vector\n machine classification;Cleaning;\n Electronic mail;Clustering algorithms;\n Signal to noise ratio;Learning\n systems;Data mining;Geology;imbalanced\n datasets;hybrid resampling;clustering;\n differential evolution;support vector\n machine},\n doi={10.1109/WKDD.2010.48},\n ISSN={},\n month={Jan},}\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n crossover_rate=0.5,\n similarity_threshold=0.5,\n n_clusters=30, n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n crossover_rate (float): cross over rate of evoluation\n similarity_threshold (float): similarity threshold paramter\n n_clusters (int): number of clusters for cleansing\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 2)\n self.check_in_range(crossover_rate, 'crossover_rate', [0, 1])\n self.check_in_range(similarity_threshold,\n 'similarity_threshold', [0, 1])\n self.check_greater_or_equal(n_clusters, 'n_clusters', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.crossover_rate = crossover_rate\n self.similarity_threshold = similarity_threshold\n self.n_clusters = n_clusters\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'crossover_rate': [0.1, 0.5, 0.9],\n 'similarity_threshold': [0.5, 0.9],\n 'n_clusters': [10, 20, 50]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n d = len(X[0])\n\n X_min = X[y == self.min_label]\n\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distance, indices = nn.kneighbors(X_min)\n\n # generating samples\n samples = []\n for _ in range(n_to_sample):\n # mutation according to the description in the paper\n random_index = self.random_state.randint(len(X_min))\n random_point = X_min[random_index]\n random_neighbor_indices = self.random_state.choice(\n indices[random_index][1:], 2, replace=False)\n random_neighbor_1 = X_min[random_neighbor_indices[0]]\n random_neighbor_2 = X_min[random_neighbor_indices[1]]\n\n mutated = random_point + \\\n (random_neighbor_1 - random_neighbor_2) * \\\n self.random_state.random_sample()\n\n # crossover - updates the vector 'mutated'\n rand_s = self.random_state.randint(d)\n for i in range(d):\n random_value = self.random_state.random_sample()\n if random_value >= self.crossover_rate and not i == rand_s:\n mutated[i] = random_point[i]\n elif random_value < self.crossover_rate or i == rand_s:\n pass\n\n samples.append(mutated)\n\n # assembling all data for clearning\n X, y = np.vstack([X, np.vstack(samples)]), np.hstack(\n [y, np.repeat(self.min_label, len(samples))])\n X_min = X[y == self.min_label]\n\n # cleansing based on clustering\n n_clusters = min([len(X), self.n_clusters])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X)\n unique_labels = np.unique(kmeans.labels_)\n\n def cluster_filter(li):\n return len(np.unique(y[np.where(kmeans.labels_ == li)[0]])) == 1\n\n one_label_clusters = [li for li in unique_labels if cluster_filter(li)]\n to_remove = []\n\n # going through the clusters having one label only\n for li in one_label_clusters:\n cluster_indices = np.where(kmeans.labels_ == li)[0]\n mean_of_cluster = kmeans.cluster_centers_[li]\n\n # finding center-like sample\n center_like_index = None\n center_like_dist = np.inf\n\n for i in cluster_indices:\n dist = np.linalg.norm(X[i] - mean_of_cluster)\n if dist < center_like_dist:\n center_like_dist = dist\n center_like_index = i\n\n # removing the samples similar to the center-like sample\n for i in cluster_indices:\n if i != center_like_index:\n d = np.inner(X[i], X[center_like_index]) / \\\n (np.linalg.norm(X[i]) *\n np.linalg.norm(X[center_like_index]))\n if d > self.similarity_threshold:\n to_remove.append(i)\n\n return np.delete(X, to_remove, axis=0), np.delete(y, to_remove)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'crossover_rate': self.crossover_rate,\n 'similarity_threshold': self.similarity_threshold,\n 'n_clusters': self.n_clusters,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n# Borrowed from sklearn-dev, will be removed once the sklearn implementation\n# becomes stable\n\n\nclass OPTICS:\n def __init__(self, min_samples=5, max_eps=np.inf, metric='euclidean',\n p=2, metric_params=None, maxima_ratio=.75,\n rejection_ratio=.7, similarity_threshold=0.4,\n significant_min=.003, min_cluster_size=.005,\n min_maxima_ratio=0.001, algorithm='ball_tree',\n leaf_size=30, n_jobs=1):\n\n self.max_eps = max_eps\n self.min_samples = min_samples\n self.maxima_ratio = maxima_ratio\n self.rejection_ratio = rejection_ratio\n self.similarity_threshold = similarity_threshold\n self.significant_min = significant_min\n self.min_cluster_size = min_cluster_size\n self.min_maxima_ratio = min_maxima_ratio\n self.algorithm = algorithm\n self.metric = metric\n self.metric_params = metric_params\n self.p = p\n self.leaf_size = leaf_size\n self.n_jobs = n_jobs\n\n def fit(self, X, y=None):\n \"\"\"Perform OPTICS clustering\n Extracts an ordered list of points and reachability distances, and\n performs initial clustering using `max_eps` distance specified at\n OPTICS object instantiation.\n Parameters\n ----------\n X : array, shape (n_samples, n_features)\n The data.\n y : ignored\n Returns\n -------\n self : instance of OPTICS\n The instance.\n \"\"\"\n n_samples = len(X)\n\n if self.min_samples > n_samples:\n m = (\"Number of training samples (n_samples=%d) must \"\n \"be greater than min_samples (min_samples=%d) \"\n \"used for clustering.\")\n m = m % (n_samples, self.min_samples)\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n if self.min_cluster_size <= 0 or (self.min_cluster_size !=\n int(self.min_cluster_size)\n and self.min_cluster_size > 1):\n m = ('min_cluster_size must be a positive integer or '\n 'a float between 0 and 1. Got %r')\n m = m % self.min_cluster_size\n raise ValueError(self.__class__.__name__ + \": \" + m)\n elif self.min_cluster_size > n_samples:\n m = ('min_cluster_size must be no greater than the '\n 'number of samples (%d). Got %d')\n m = m % (n_samples, self.min_cluster_size)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n # Start all points as 'unprocessed' ##\n self.reachability_ = np.empty(n_samples)\n self.reachability_.fill(np.inf)\n self.core_distances_ = np.empty(n_samples)\n self.core_distances_.fill(np.nan)\n # Start all points as noise ##\n self.labels_ = np.full(n_samples, -1, dtype=int)\n\n nbrs = NearestNeighbors(n_neighbors=self.min_samples,\n algorithm=self.algorithm,\n leaf_size=self.leaf_size, metric=self.metric,\n metric_params=self.metric_params, p=self.p,\n n_jobs=self.n_jobs)\n\n nbrs.fit(X)\n self.core_distances_[:] = nbrs.kneighbors(X,\n self.min_samples)[0][:, -1]\n\n self.ordering_ = self._calculate_optics_order(X, nbrs)\n\n return self\n\n # OPTICS helper functions\n\n def _calculate_optics_order(self, X, nbrs):\n # Main OPTICS loop. Not parallelizable. The order that entries are\n # written to the 'ordering_' list is important!\n processed = np.zeros(X.shape[0], dtype=bool)\n ordering = np.zeros(X.shape[0], dtype=int)\n ordering_idx = 0\n for point in range(X.shape[0]):\n if processed[point]:\n continue\n if self.core_distances_[point] <= self.max_eps:\n while not processed[point]:\n processed[point] = True\n ordering[ordering_idx] = point\n ordering_idx += 1\n point = self._set_reach_dist(point, processed, X, nbrs)\n else: # For very noisy points\n ordering[ordering_idx] = point\n ordering_idx += 1\n processed[point] = True\n return ordering\n\n def _set_reach_dist(self, point_index, processed, X, nbrs):\n P = X[point_index:point_index + 1]\n indices = nbrs.radius_neighbors(P, radius=self.max_eps,\n return_distance=False)[0]\n\n # Getting indices of neighbors that have not been processed\n unproc = np.compress((~np.take(processed, indices)).ravel(),\n indices, axis=0)\n # Keep n_jobs = 1 in the following lines...please\n if not unproc.size:\n # Everything is already processed. Return to main loop\n return point_index\n\n dists = pairwise_distances(P, np.take(X, unproc, axis=0),\n self.metric, n_jobs=1).ravel()\n\n rdists = np.maximum(dists, self.core_distances_[point_index])\n new_reach = np.minimum(np.take(self.reachability_, unproc), rdists)\n self.reachability_[unproc] = new_reach\n\n # Define return order based on reachability distance\n return (unproc[self.quick_scan(np.take(self.reachability_, unproc),\n dists)])\n\n def isclose(self, a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a-b) <= max([rel_tol*max([abs(a), abs(b)]), abs_tol])\n\n def quick_scan(self, rdists, dists):\n rdist = np.inf\n dist = np.inf\n n = len(rdists)\n for i in range(n):\n if rdists[i] < rdist:\n rdist = rdists[i]\n dist = dists[i]\n idx = i\n elif self.isclose(rdists[i], rdist):\n if dists[i] < dist:\n dist = dists[i]\n idx = i\n return idx\n\n\nclass SMOBD(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{smobd,\n author={Cao, Q. and Wang, S.},\n booktitle={2011 International Conference on\n Information Management, Innovation\n Management and Industrial\n Engineering},\n title={Applying Over-sampling Technique Based\n on Data Density and Cost-sensitive\n SVM to Imbalanced Learning},\n year={2011},\n volume={2},\n number={},\n pages={543-548},\n keywords={data handling;learning (artificial\n intelligence);support vector machines;\n oversampling technique application;\n data density;cost sensitive SVM;\n imbalanced learning;SMOTE algorithm;\n data distribution;density information;\n Support vector machines;Classification\n algorithms;Noise measurement;Arrays;\n Noise;Algorithm design and analysis;\n Training;imbalanced learning;\n cost-sensitive SVM;SMOTE;data density;\n SMOBD},\n doi={10.1109/ICIII.2011.276},\n ISSN={2155-1456},\n month={Nov},}\n \"\"\"\n\n categories = [OverSampling.cat_uses_clustering,\n OverSampling.cat_density_based,\n OverSampling.cat_extensive,\n OverSampling.cat_noise_removal]\n\n def __init__(self,\n proportion=1.0,\n eta1=0.5,\n t=1.8,\n min_samples=5,\n max_eps=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n eta1 (float): control parameter of density estimation\n t (float): control parameter of noise filtering\n min_samples (int): minimum samples parameter for OPTICS\n max_eps (float): maximum environment radius paramter for OPTICS\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_in_range(eta1, 'eta1', [0.0, 1.0])\n self.check_greater_or_equal(t, 't', 0)\n self.check_greater_or_equal(min_samples, 'min_samples', 1)\n self.check_greater_or_equal(max_eps, 'max_eps', 0.0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.eta1 = eta1\n self.t = t\n self.min_samples = min_samples\n self.max_eps = max_eps\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'eta1': [0.1, 0.5, 0.9],\n 't': [1.5, 2.5],\n 'min_samples': [5],\n 'max_eps': [0.1, 0.5, 1.0, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # running the OPTICS technique based on the sklearn implementation\n # TODO: replace to sklearn call once it is stable\n min_samples = min([len(X_min)-1, self.min_samples])\n o = OPTICS(min_samples=min_samples,\n max_eps=self.max_eps,\n n_jobs=self.n_jobs)\n o.fit(X_min)\n cd = o.core_distances_\n rd = o.reachability_\n\n # noise filtering\n cd_average = np.mean(cd)\n rd_average = np.mean(rd)\n noise = np.logical_and(cd > cd_average*self.t, rd > rd_average*self.t)\n\n # fitting a nearest neighbor model to be able to find\n # neighbors in radius\n n_neighbors = min([len(X_min), self.min_samples+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distances, indices = nn.kneighbors(X_min)\n\n # determining the density\n factor_1 = cd\n factor_2 = np.array([len(x) for x in nn.radius_neighbors(\n X_min, radius=self.max_eps, return_distance=False)])\n\n if max(factor_1) == 0 or max(factor_2) == 0:\n return X.copy(), y.copy()\n\n factor_1 = factor_1/max(factor_1)\n factor_2 = factor_2/max(factor_2)\n\n df = factor_1*self.eta1 + factor_2*(1 - self.eta1)\n\n # setting the density at noisy samples to zero\n for i in range(len(noise)):\n if noise[i]:\n df[i] = 0\n\n if sum(df) == 0 or any(np.isnan(df)) or any(np.isinf(df)):\n return X.copy(), y.copy()\n\n # normalizing the density\n df_dens = df/sum(df)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(X_min)), p=df_dens)\n neighbor_idx = self.random_state.choice(indices[idx][1:])\n samples.append(self.sample_between_points_componentwise(\n X_min[idx], X_min[neighbor_idx]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'eta1': self.eta1,\n 't': self.t,\n 'min_samples': self.min_samples,\n 'max_eps': self.max_eps,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SUNDO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{sundo,\n author={Cateni, S. and Colla, V. and Vannucci, M.},\n booktitle={2011 11th International Conference on\n Intelligent Systems Design and\n Applications},\n title={Novel resampling method for the\n classification of imbalanced datasets for\n industrial and other real-world problems},\n year={2011},\n volume={},\n number={},\n pages={402-407},\n keywords={decision trees;pattern classification;\n sampling methods;support vector\n machines;resampling method;imbalanced\n dataset classification;industrial\n problem;real world problem;\n oversampling technique;undersampling\n technique;support vector machine;\n decision tree;binary classification;\n synthetic dataset;public dataset;\n industrial dataset;Support vector\n machines;Training;Accuracy;Databases;\n Intelligent systems;Breast cancer;\n Decision trees;oversampling;\n undersampling;imbalanced dataset},\n doi={10.1109/ISDA.2011.6121689},\n ISSN={2164-7151},\n month={Nov}}\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_application]\n\n def __init__(self, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return [{}]\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n n_1 = len(X_min)\n n_0 = len(X) - n_1\n N = int(np.rint(0.5*n_0 - 0.5*n_1 + 0.5))\n\n if N == 0:\n return X.copy(), y.copy()\n\n # generating minority samples\n samples = []\n\n nn = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn.fit(X_maj)\n\n stds = np.std(X_min, axis=0)\n # At one point the algorithm says to keep those points which are\n # the most distant from majority samples, and not leaving any minority\n # sample isolated. This can be implemented by generating multiple\n # samples for each point and keep the one most distant from the\n # majority samples.\n for _ in range(N):\n i = self.random_state.randint(len(X_min))\n best_sample = None\n best_sample_dist = 0\n for _ in range(3):\n s = self.random_state.normal(X_min[i], stds)\n dist, ind = nn.kneighbors(s.reshape(1, -1))\n if dist[0][0] > best_sample_dist:\n best_sample_dist = dist[0][0]\n best_sample = s\n samples.append(best_sample)\n\n # Extending the minority dataset with the new samples\n X_min_extended = np.vstack([X_min, np.vstack(samples)])\n\n # Removing N elements from the majority dataset\n\n # normalize\n mms = MinMaxScaler()\n X_maj_normalized = mms.fit_transform(X_maj)\n\n # computing the distance matrix\n dm = pairwise_distances(X_maj_normalized, X_maj_normalized)\n\n # len(X_maj) offsets for the diagonal 0 elements, 2N because\n # every distances appears twice\n threshold = sorted(dm.flatten())[min(\n [len(X_maj) + 2*N, len(dm)*len(dm) - 1])]\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # extracting the coordinates of pairs closer than threshold\n pairs_to_break = np.where(dm < threshold)\n pairs_to_break = np.vstack(pairs_to_break)\n\n # sorting the pairs, otherwise both points would be removed\n pairs_to_break.sort(axis=0)\n\n # uniqueing the coordinates - the final number might be less than N\n to_remove = np.unique(pairs_to_break[0])\n\n # removing the selected elements\n X_maj_cleaned = np.delete(X_maj, to_remove, axis=0)\n\n return (np.vstack([X_min_extended, X_maj_cleaned]),\n np.hstack([np.repeat(self.min_label, len(X_min_extended)),\n np.repeat(self.maj_label, len(X_maj_cleaned))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MSYN(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{msyn,\n author=\"Fan, Xiannian\n and Tang, Ke\n and Weise, Thomas\",\n editor=\"Huang, Joshua Zhexue\n and Cao, Longbing\n and Srivastava, Jaideep\",\n title=\"Margin-Based Over-Sampling Method for\n Learning from Imbalanced Datasets\",\n booktitle=\"Advances in Knowledge Discovery and\n Data Mining\",\n year=\"2011\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"309--320\",\n abstract=\"Learning from imbalanced datasets has\n drawn more and more attentions from\n both theoretical and practical aspects.\n Over- sampling is a popular and simple\n method for imbalanced learning. In this\n paper, we show that there is an\n inherently potential risk associated\n with the over-sampling algorithms in\n terms of the large margin principle.\n Then we propose a new synthetic over\n sampling method, named Margin-guided\n Synthetic Over-sampling (MSYN), to\n reduce this risk. The MSYN improves\n learning with respect to the data\n distributions guided by the\n margin-based rule. Empirical study\n verities the efficacy of MSYN.\",\n isbn=\"978-3-642-20847-8\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n pressure=1.5,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n pressure (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors in the SMOTE sampling\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(pressure, 'pressure', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.pressure = pressure\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'pressure': [2.5, 2.0, 1.5],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n min_indices = np.where(y == self.min_label)[0]\n maj_indices = np.where(y == self.maj_label)[0]\n\n # generating samples\n smote = SMOTE(proportion=self.pressure,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n\n X_res, y_res = smote.sample(X, y)\n X_new, _ = X_res[len(X):], y_res[len(X):]\n\n if len(X_new) == 0:\n m = \"Sampling is not needed\"\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n # Compute nearest hit and miss for both classes\n nn = NearestNeighbors(n_neighbors=len(X), n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X)\n\n # computing nearest hit and miss distances, these will be used to\n # compute thetas\n nearest_hit_dist = np.array([dist[i][next(j for j in range(\n 1, len(X)) if y[i] == y[ind[i][j]])] for i in range(len(X))])\n nearest_miss_dist = np.array([dist[i][next(j for j in range(\n 1, len(X)) if y[i] != y[ind[i][j]])] for i in range(len(X))])\n\n # computing the thetas without new samples being involved\n theta_A_sub_alpha = 0.5*(nearest_miss_dist - nearest_hit_dist)\n theta_min = theta_A_sub_alpha[min_indices]\n theta_maj = theta_A_sub_alpha[maj_indices]\n\n # computing the f_3 score for all new samples\n f_3 = []\n for x in X_new:\n # determining the distances of the new sample from the training set\n distances = np.linalg.norm(X - x, axis=1)\n\n # computing nearest hit and miss distances involving the new\n # elements\n mask = nearest_hit_dist[min_indices] < distances[min_indices]\n nearest_hit_dist_min = np.where(mask,\n nearest_hit_dist[min_indices],\n distances[min_indices])\n nearest_miss_dist_min = nearest_miss_dist[min_indices]\n nearest_hit_dist_maj = nearest_hit_dist[maj_indices]\n mask = nearest_miss_dist[maj_indices] < distances[maj_indices]\n nearest_miss_dist_maj = np.where(mask,\n nearest_miss_dist[maj_indices],\n distances[maj_indices])\n\n # computing the thetas incorporating the new elements\n theta_x_min = 0.5*(nearest_miss_dist_min - nearest_hit_dist_min)\n theta_x_maj = 0.5*(nearest_miss_dist_maj - nearest_hit_dist_maj)\n\n # determining the delta scores and computing f_3\n Delta_P = np.sum(theta_x_min - theta_min)\n Delta_N = np.sum(theta_x_maj - theta_maj)\n\n f_3.append(-Delta_N/(Delta_P + 0.01))\n\n f_3 = np.array(f_3)\n\n # determining the elements with the minimum f_3 scores to add\n _, new_ind = zip(\n *sorted(zip(f_3, np.arange(len(f_3))), key=lambda x: x[0]))\n new_ind = list(new_ind[:(len(X_maj) - len(X_min))])\n\n return (np.vstack([X, X_new[new_ind]]),\n np.hstack([y, np.repeat(self.min_label, len(new_ind))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'pressure': self.pressure,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SVM_balance(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{svm_balance,\n author = {Farquad, M.A.H. and Bose, Indranil},\n title = {Preprocessing Unbalanced Data Using Support\n Vector Machine},\n journal = {Decis. Support Syst.},\n issue_date = {April, 2012},\n volume = {53},\n number = {1},\n month = apr,\n year = {2012},\n issn = {0167-9236},\n pages = {226--233},\n numpages = {8},\n url = {http://dx.doi.org/10.1016/j.dss.2012.01.016},\n doi = {10.1016/j.dss.2012.01.016},\n acmid = {2181554},\n publisher = {Elsevier Science Publishers B. V.},\n address = {Amsterdam, The Netherlands, The Netherlands},\n keywords = {COIL data, Hybrid method, Preprocessor, SVM,\n Unbalanced data},\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_classifier,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in the SMOTE sampling\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n X, y = SMOTE(proportion=self.proportion,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n if sum(y == self.min_label) < 2:\n return X.copy(), y.copy()\n else:\n cv = min([5, sum(y == self.min_label)])\n\n ss = StandardScaler()\n X_norm = ss.fit_transform(X)\n\n C_params = [0.01, 0.1, 1.0, 10.0]\n best_score = 0\n best_C = 0.01\n for C in C_params:\n _logger.info(self.__class__.__name__ + \": \" +\n \"Evaluating SVM with C=%f\" % C)\n svc = SVC(C=C, kernel='rbf', gamma='auto')\n score = np.mean(cross_val_score(svc, X_norm, y, cv=cv))\n if score > best_score:\n best_score = score\n best_C = C\n svc = SVC(C=best_C, kernel='rbf', gamma='auto')\n svc.fit(X_norm, y)\n\n return X, svc.predict(X_norm)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass TRIM_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{trim_smote,\n author=\"Puntumapon, Kamthorn\n and Waiyamai, Kitsana\",\n editor=\"Tan, Pang-Ning\n and Chawla, Sanjay\n and Ho, Chin Kuan\n and Bailey, James\",\n title=\"A Pruning-Based Approach for Searching\n Precise and Generalized Region for\n Synthetic Minority Over-Sampling\",\n booktitle=\"Advances in Knowledge Discovery\n and Data Mining\",\n year=\"2012\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"371--382\",\n isbn=\"978-3-642-30220-6\"\n }\n\n Notes:\n * It is not described precisely how the filtered data is used for\n sample generation. The method is proposed to be a preprocessing\n step, and it states that it applies sample generation to each\n group extracted.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n min_precision=0.3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_in_range(min_precision, 'min_precision', [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.min_precision = min_precision\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'min_precision': [0.3]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def trim(self, y):\n \"\"\"\n Determines the trim value.\n\n Args:\n y (np.array): array of target labels\n\n Returns:\n float: the trim value\n \"\"\"\n return np.sum(y == self.min_label)**2/len(y)\n\n def precision(self, y):\n \"\"\"\n Determines the precision value.\n\n Args:\n y (np.array): array of target labels\n\n Returns:\n float: the precision value\n \"\"\"\n return np.sum(y == self.min_label)/len(y)\n\n def determine_splitting_point(self, X, y, split_on_border=False):\n \"\"\"\n Determines the splitting point.\n\n Args:\n X (np.matrix): a subset of the training data\n y (np.array): an array of target labels\n split_on_border (bool): wether splitting on class borders is\n considered\n\n Returns:\n tuple(int, float), bool: (splitting feature, splitting value),\n make the split\n \"\"\"\n trim_value = self.trim(y)\n d = len(X[0])\n max_t_minus_gain = 0.0\n split = None\n\n # checking all dimensions of X\n for i in range(d):\n # sort the elements in dimension i\n sorted_X_y = sorted(zip(X[:, i], y), key=lambda pair: pair[0])\n sorted_y = [yy for _, yy in sorted_X_y]\n\n # number of minority samples on the left\n left_min = 0\n # number of minority samples on the right\n right_min = np.sum(sorted_y == self.min_label)\n\n # check all possible splitting points sequentiall\n for j in range(0, len(sorted_y)-1):\n if sorted_y[j] == self.min_label:\n # adjusting the number of minority and majority samples\n left_min = left_min + 1\n right_min = right_min - 1\n # checking of we can split on the border and do not split\n # tieing feature values\n if ((split_on_border is False\n or (split_on_border is True\n and not sorted_y[j-1] == sorted_y[j]))\n and sorted_X_y[j][0] != sorted_X_y[j+1][0]):\n # compute trim value of the left\n trim_left = left_min**2/(j+1)\n # compute trim value of the right\n trim_right = right_min**2/(len(sorted_y) - j - 1)\n # let's check the gain\n if max([trim_left, trim_right]) > max_t_minus_gain:\n max_t_minus_gain = max([trim_left, trim_right])\n split = (i, sorted_X_y[j][0])\n # return splitting values and the value of the logical condition\n # in line 9\n if split is not None:\n return split, max_t_minus_gain > trim_value\n else:\n return (0, 0), False\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n leafs = [(X, y)]\n candidates = []\n seeds = []\n\n # executing the trimming\n # loop in line 2 of the paper\n _logger.info(self.__class__.__name__ +\n \": \" + \"do the trimming process\")\n while len(leafs) > 0 or len(candidates) > 0:\n add_to_leafs = []\n # executing the loop starting in line 3\n for leaf in leafs:\n # the function implements the loop starting in line 6\n # splitting on class border is forced\n split, gain = self.determine_splitting_point(\n leaf[0], leaf[1], True)\n if len(leaf[0]) == 1:\n # small leafs with 1 element (no splitting point)\n # are dropped as noise\n continue\n else:\n # condition in line 9\n if gain:\n # making the split\n mask_left = (leaf[0][:, split[0]] <= split[1])\n X_left = leaf[0][mask_left]\n y_left = leaf[1][mask_left]\n mask_right = np.logical_not(mask_left)\n X_right = leaf[0][mask_right]\n y_right = leaf[1][mask_right]\n\n # condition in line 11\n if np.sum(y_left == self.min_label) > 0:\n add_to_leafs.append((X_left, y_left))\n # condition in line 13\n if np.sum(y_right == self.min_label) > 0:\n add_to_leafs.append((X_right, y_right))\n else:\n # line 16\n candidates.append(leaf)\n # we implement line 15 and 18 by replacing the list of leafs by\n # the list of new leafs.\n leafs = add_to_leafs\n\n # iterating through all candidates (loop starting in line 21)\n for c in candidates:\n # extracting splitting points, this time split on border\n # is not forced\n split, gain = self.determine_splitting_point(c[0], c[1], False)\n if len(c[0]) == 1:\n # small leafs are dropped as noise\n continue\n else:\n # checking condition in line 27\n if gain:\n # doing the split\n mask_left = (c[0][:, split[0]] <= split[1])\n X_left, y_left = c[0][mask_left], c[1][mask_left]\n mask_right = np.logical_not(mask_left)\n X_right, y_right = c[0][mask_right], c[1][mask_right]\n # checking logic in line 29\n if np.sum(y_left == self.min_label) > 0:\n leafs.append((X_left, y_left))\n # checking logic in line 31\n if np.sum(y_right == self.min_label) > 0:\n leafs.append((X_right, y_right))\n else:\n # adding candidate to seeds (line 35)\n seeds.append(c)\n # line 33 and line 36 are implemented by emptying the candidates\n # list\n candidates = []\n\n # filtering the resulting set\n filtered_seeds = [s for s in seeds if self.precision(\n s[1]) > self.min_precision]\n\n # handling the situation when no seeds were found\n if len(seeds) == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"no seeds identified\")\n return X.copy(), y.copy()\n\n # fix for bad choice of min_precision\n multiplier = 0.9\n while len(filtered_seeds) == 0:\n filtered_seeds = [s for s in seeds if self.precision(\n s[1]) > self.min_precision*multiplier]\n multiplier = multiplier*0.9\n if multiplier < 0.1:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"no clusters passing the filtering\")\n return X.copy(), y.copy()\n\n seeds = filtered_seeds\n\n X_seed = np.vstack([s[0] for s in seeds])\n y_seed = np.hstack([s[1] for s in seeds])\n\n _logger.info(self.__class__.__name__ + \": \" + \"do the sampling\")\n # generating samples by SMOTE\n X_seed_min = X_seed[y_seed == self.min_label]\n if len(X_seed_min) <= 1:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"X_seed_min contains less than 2 samples\")\n return X.copy(), y.copy()\n\n n_neighbors = min([len(X_seed_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_seed_min)\n distances, indices = nn.kneighbors(X_seed_min)\n\n # do the sampling\n samples = []\n for _ in range(n_to_sample):\n random_idx = self.random_state.randint(len(X_seed_min))\n random_neighbor_idx = self.random_state.choice(\n indices[random_idx][1:])\n samples.append(self.sample_between_points(\n X_seed_min[random_idx], X_seed_min[random_neighbor_idx]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'min_precision': self.min_precision,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_RSB(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{smote_rsb,\n author=\"Ramentol, Enislay\n and Caballero, Yail{\\'e}\n and Bello, Rafael\n and Herrera, Francisco\",\n title=\"SMOTE-RSB*: a hybrid preprocessing approach\n based on oversampling and undersampling for\n high imbalanced data-sets using SMOTE and\n rough sets theory\",\n journal=\"Knowledge and Information Systems\",\n year=\"2012\",\n month=\"Nov\",\n day=\"01\",\n volume=\"33\",\n number=\"2\",\n pages=\"245--265\",\n issn=\"0219-3116\",\n doi=\"10.1007/s10115-011-0465-6\",\n url=\"https://doi.org/10.1007/s10115-011-0465-6\"\n }\n\n Notes:\n * I think the description of the algorithm in Fig 5 of the paper\n is not correct. The set \"resultSet\" is initialized with the\n original instances, and then the While loop in the Algorithm\n run until resultSet is empty, which never holds. Also, the\n resultSet is only extended in the loop. Our implementation\n is changed in the following way: we generate twice as many\n instances are required to balance the dataset, and repeat\n the loop until the number of new samples added to the training\n set is enough to balance the dataset.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=2.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors in the SMOTE sampling\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n X_maj = X[y == self.maj_label]\n X_min = X[y == self.min_label]\n\n # Step 1: do the sampling\n smote = SMOTE(proportion=self.proportion,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n\n X_samp, y_samp = smote.sample(X, y)\n X_samp, y_samp = X_samp[len(X):], y_samp[len(X):]\n\n if len(X_samp) == 0:\n return X.copy(), y.copy()\n\n # Step 2: (original will be added later)\n result_set = []\n\n # Step 3: first the data is normalized\n maximums = np.max(X_samp, axis=0)\n minimums = np.min(X_samp, axis=0)\n\n # normalize X_new and X_maj\n norm_factor = maximums - minimums\n null_mask = norm_factor == 0\n n_null = np.sum(null_mask)\n fixed = np.max(np.vstack([maximums[null_mask], np.repeat(1, n_null)]),\n axis=0)\n\n norm_factor[null_mask] = fixed\n\n X_samp_norm = X_samp / norm_factor\n X_maj_norm = X_maj / norm_factor\n\n # compute similarity matrix\n similarity_matrix = 1.0 - pairwise_distances(X_samp_norm,\n X_maj_norm,\n metric='minkowski',\n p=1)/len(X[0])\n\n # Step 4: counting the similar examples\n similarity_value = 0.4\n syn = len(X_samp)\n cont = np.zeros(syn)\n\n already_added = np.repeat(False, len(X_samp))\n\n while (len(result_set) < len(X_maj) - len(X_min)\n and similarity_value <= 0.9):\n for i in range(syn):\n cont[i] = np.sum(similarity_matrix[i, :] > similarity_value)\n if cont[i] == 0 and not already_added[i]:\n result_set.append(X_samp[i])\n already_added[i] = True\n similarity_value = similarity_value + 0.05\n\n # Step 5: returning the results depending the number of instances\n # added to the result set\n if len(result_set) > 0:\n return (np.vstack([X, np.vstack(result_set)]),\n np.hstack([y, np.repeat(self.min_label,\n len(result_set))]))\n else:\n return np.vstack([X, X_samp]), np.hstack([y, y_samp])\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ProWSyn(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{prowsyn,\n author=\"Barua, Sukarna\n and Islam, Md. Monirul\n and Murase, Kazuyuki\",\n editor=\"Pei, Jian\n and Tseng, Vincent S.\n and Cao, Longbing\n and Motoda, Hiroshi\n and Xu, Guandong\",\n title=\"ProWSyn: Proximity Weighted Synthetic\n Oversampling Technique for\n Imbalanced Data Set Learning\",\n booktitle=\"Advances in Knowledge Discovery\n and Data Mining\",\n year=\"2013\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"317--328\",\n isbn=\"978-3-642-37456-2\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n L=5,\n theta=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n L (int): number of levels\n theta (float): smoothing factor in weight formula\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(L, \"L\", 1)\n self.check_greater_or_equal(theta, \"theta\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.L = L\n self.theta = theta\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'L': [3, 5, 7],\n 'theta': [0.1, 1.0, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and\n target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # Step 1 - a bit generalized\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n m = \"Sampling is not needed\"\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n # Step 2\n P = np.where(y == self.min_label)[0]\n X_maj = X[y == self.maj_label]\n\n Ps = []\n proximity_levels = []\n\n # Step 3\n for i in range(self.L):\n if len(P) == 0:\n break\n # Step 3 a\n n_neighbors = min([len(P), self.n_neighbors])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X[P])\n distances, indices = nn.kneighbors(X_maj)\n\n # Step 3 b\n P_i = np.unique(np.hstack([i for i in indices]))\n\n # Step 3 c - proximity levels are encoded in the Ps list index\n Ps.append(P[P_i])\n proximity_levels.append(i+1)\n\n # Step 3 d\n P = np.delete(P, P_i)\n\n # Step 4\n if len(P) > 0:\n Ps.append(P)\n\n # Step 5\n if len(P) > 0:\n proximity_levels.append(i)\n proximity_levels = np.array(proximity_levels)\n\n # Step 6\n weights = np.array([np.exp(-self.theta*(proximity_levels[i] - 1))\n for i in range(len(proximity_levels))])\n # weights is the probability distribution of sampling in the\n # clusters identified\n weights = weights/np.sum(weights)\n\n suitable = False\n for i in range(len(weights)):\n if weights[i] > 0 and len(Ps[i]) > 1:\n suitable = True\n\n if not suitable:\n return X.copy(), y.copy()\n\n # do the sampling, from each cluster proportionally to the distribution\n samples = []\n while len(samples) < n_to_sample:\n cluster_idx = self.random_state.choice(\n np.arange(len(weights)), p=weights)\n if len(Ps[cluster_idx]) > 1:\n random_idx1, random_idx2 = self.random_state.choice(\n Ps[cluster_idx], 2, replace=False)\n samples.append(self.sample_between_points(\n X[random_idx1], X[random_idx2]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'L': self.L,\n 'theta': self.theta,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SL_graph_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{sl_graph_smote,\n author = {Bunkhumpornpat,\n Chumpol and Subpaiboonkit, Sitthichoke},\n booktitle= {13th International Symposium on Communications\n and Information Technologies},\n year = {2013},\n month = {09},\n pages = {570-575},\n title = {Safe level graph for synthetic minority\n over-sampling techniques},\n isbn = {978-1-4673-5578-0}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # Fitting nearest neighbors model\n n_neighbors = min([len(X), self.n_neighbors])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X[y == self.min_label])\n\n # Computing safe level values\n safe_level_values = np.array(\n [np.sum(y[i] == self.min_label) for i in indices])\n\n # Computing skewness\n skewness = skew(safe_level_values)\n\n if skewness < 0:\n # left skewed\n s = Safe_Level_SMOTE(self.proportion,\n self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n else:\n # right skewed\n s = Borderline_SMOTE1(self.proportion,\n self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n\n return s.sample(X, y)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass NRSBoundary_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{nrsboundary_smote,\n author= {Feng, Hu and Hang, Li},\n title= {A Novel Boundary Oversampling Algorithm Based on\n Neighborhood Rough Set Model: NRSBoundary-SMOTE},\n journal= {Mathematical Problems in Engineering},\n year= {2013},\n pages= {10},\n doi= {10.1155/2013/694809},\n url= {http://dx.doi.org/10.1155/694809}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n w=0.005,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n w (float): used to set neighborhood radius\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(w, \"w\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.w = w\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'w': [0.005, 0.01, 0.05]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determining the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # step 1\n bound_set = []\n pos_set = []\n\n # step 2\n X_min_indices = np.where(y == self.min_label)[0]\n X_min = X[X_min_indices]\n\n # step 3\n dm = pairwise_distances(X, X)\n d_max = np.max(dm, axis=1)\n max_dist = np.max(dm)\n np.fill_diagonal(dm, max_dist)\n d_min = np.min(dm, axis=1)\n\n delta = d_min + self.w*(d_max - d_min)\n\n # number of neighbors is not interesting here, as we use the\n # radius_neighbors function to extract the neighbors in a given radius\n n_neighbors = min([self.n_neighbors + 1, len(X)])\n nn = NearestNeighbors(n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n for i in range(len(X)):\n indices = nn.radius_neighbors(X[i].reshape(1, -1),\n delta[i],\n return_distance=False)\n\n n_minority = np.sum(y[indices[0]] == self.min_label)\n n_majority = np.sum(y[indices[0]] == self.maj_label)\n if y[i] == self.min_label and not n_minority == len(indices[0]):\n bound_set.append(i)\n elif y[i] == self.maj_label and n_majority == len(indices[0]):\n pos_set.append(i)\n\n bound_set = np.array(bound_set)\n pos_set = np.array(pos_set)\n\n if len(pos_set) == 0 or len(bound_set) == 0:\n return X.copy(), y.copy()\n\n # step 4 and 5\n # computing the nearest neighbors of the bound set from the\n # minority set\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distances, indices = nn.kneighbors(X[bound_set])\n\n # do the sampling\n samples = []\n trials = 0\n w = self.w\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(len(bound_set))\n random_neighbor_idx = self.random_state.choice(indices[idx][1:])\n x_new = self.sample_between_points(\n X[bound_set[idx]], X_min[random_neighbor_idx])\n\n # checking the conflict\n dist_from_pos_set = np.linalg.norm(X[pos_set] - x_new, axis=1)\n if np.all(dist_from_pos_set > delta[pos_set]):\n # no conflict\n samples.append(x_new)\n trials = trials + 1\n if trials > 1000 and len(samples) == 0:\n trials = 0\n w = w*0.9\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'w': self.w,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass LVQ_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{lvq_smote,\n title={LVQ-SMOTE – Learning Vector Quantization\n based Synthetic Minority Over–sampling\n Technique for biomedical data},\n author={Munehiro Nakamura and Yusuke Kajiwara\n and Atsushi Otsuka and Haruhiko Kimura},\n booktitle={BioData Mining},\n year={2013}\n }\n\n Notes:\n * This implementation is only a rough approximation of the method\n described in the paper. The main problem is that the paper uses\n many datasets to find similar patterns in the codebooks and\n replicate patterns appearing in other datasets to the imbalanced\n datasets based on their relative position compared to the codebook\n elements. What we do is clustering the minority class to extract\n a codebook as kmeans cluster means, then, find pairs of codebook\n elements which have the most similar relative position to a\n randomly selected pair of codebook elements, and translate nearby\n minority samples from the neighborhood one pair of codebook\n elements to the neighborood of another pair of codebook elements.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_application]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_clusters=10,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n n_clusters (int): number of clusters in vector quantization\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_clusters, \"n_clusters\", 3)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_clusters = n_clusters\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_clusters': [4, 8, 12]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # clustering X_min to extract codebook\n n_clusters = min([len(X_min), self.n_clusters])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X_min)\n codebook = kmeans.cluster_centers_\n\n # get nearest neighbors of minority samples to codebook samples\n n_neighbors = min([len(X_min), self.n_neighbors])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distances, indices = nn.kneighbors(codebook)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n # randomly selecting a pair of codebook elements\n cb_0, cb_1 = self.random_state.choice(\n list(range(len(codebook))), 2, replace=False)\n diff = codebook[cb_0] - codebook[cb_1]\n min_dist = np.inf\n min_0 = None\n # finding another pair of codebook elements with similar offset\n for i in range(len(codebook)):\n for j in range(len(codebook)):\n if cb_0 != i and cb_0 != j and cb_1 != i and cb_1 != j:\n dd = np.linalg.norm(diff - (codebook[i] - codebook[j]))\n if dd < min_dist:\n min_dist = dd\n min_0 = self.random_state.choice([i, j])\n\n # translating a random neighbor of codebook element min_0 to\n # the neighborhood of point_0\n random_index = self.random_state.randint(len(indices[min_0]))\n sample = X_min[indices[min_0][random_index]]\n point_0 = codebook[cb_0] + (sample - codebook[min_0])\n\n samples.append(point_0)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_clusters': self.n_clusters,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SOI_CJ(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{soi_cj,\n author = {Sánchez, Atlántida I. and Morales, Eduardo and\n Gonzalez, Jesus},\n year = {2013},\n month = {01},\n pages = {},\n title = {Synthetic Oversampling of Instances Using\n Clustering},\n volume = {22},\n booktitle = {International Journal of Artificial\n Intelligence Tools}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n method='interpolation',\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of nearest neighbors in the SMOTE\n sampling\n method (str): 'interpolation'/'jittering'\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_isin(method, 'method', ['interpolation', 'jittering'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.method = method\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'method': ['interpolation', 'jittering']}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def clustering(self, X, y):\n \"\"\"\n Implementation of the clustering technique described in the paper.\n\n Args:\n X (np.matrix): array of training instances\n y (np.array): target labels\n\n Returns:\n list(set): list of minority clusters\n \"\"\"\n nn_all = NearestNeighbors(n_jobs=self.n_jobs)\n nn_all.fit(X)\n\n X_min = X[y == self.min_label]\n\n # extract nearest neighbors of all samples from the set of\n # minority samples\n nn = NearestNeighbors(n_neighbors=len(X_min), n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_min)\n\n # initialize clusters by minority samples\n clusters = []\n for i in range(len(X_min)):\n # empty cluster added\n clusters.append(set())\n # while the closest instance is from the minority class, adding it\n # to the cluster\n for j in indices[i]:\n if y[j] == self.min_label:\n clusters[i].add(j)\n else:\n break\n\n # cluster merging phase\n is_intersection = True\n while is_intersection:\n is_intersection = False\n for i in range(len(clusters)):\n for j in range(i + 1, len(clusters)):\n # computing intersection\n intersection = clusters[i].intersection(clusters[j])\n if len(intersection) > 0:\n is_intersection = True\n # computing distance matrix\n dm = pairwise_distances(\n X[list(clusters[i])], X[list(clusters[j])])\n # largest distance\n max_dist_pair = np.where(dm == np.max(dm))\n # elements with the largest distance\n max_i = X[list(clusters[i])[max_dist_pair[0][0]]]\n max_j = X[list(clusters[j])[max_dist_pair[1][0]]]\n\n # finding midpoint and radius\n mid_point = (max_i + max_j)/2.0\n radius = np.linalg.norm(mid_point - max_i)\n\n # extracting points within the hypersphare of\n # radius \"radius\"\n mid_point_reshaped = mid_point.reshape(1, -1)\n ind = nn_all.radius_neighbors(mid_point_reshaped,\n radius,\n return_distance=False)\n\n n_min = np.sum(y[ind[0]] == self.min_label)\n if n_min > len(ind[0])/2:\n # if most of the covered elements come from the\n # minority class, merge clusters\n clusters[i].update(clusters[j])\n clusters[j] = set()\n else:\n # otherwise move the difference to the\n # bigger cluster\n if len(clusters[i]) > len(clusters[j]):\n clusters[j].difference_update(intersection)\n else:\n clusters[i].difference_update(intersection)\n\n # returning non-empty clusters\n return [c for c in clusters if len(c) > 0]\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n std_min = np.std(X_min, axis=0)\n\n # do the clustering\n _logger.info(self.__class__.__name__ + \": \" + \"Executing clustering\")\n clusters = self.clustering(X, y)\n\n # filtering the clusters, at least two points in a cluster are needed\n # for both interpolation and jittering (due to the standard deviation)\n clusters_filtered = [list(c) for c in clusters if len(c) > 2]\n\n if len(clusters_filtered) > 0:\n # if there are clusters having at least 2 elements, do the sampling\n cluster_nums = [len(c) for c in clusters_filtered]\n cluster_weights = cluster_nums/np.sum(cluster_nums)\n cluster_stds = [np.std(X[clusters_filtered[i]], axis=0)\n for i in range(len(clusters_filtered))]\n\n _logger.info(self.__class__.__name__ + \": \" +\n \"Executing sample generation\")\n samples = []\n while len(samples) < n_to_sample:\n cluster_idx = self.random_state.choice(\n np.arange(len(clusters_filtered)), p=cluster_weights)\n if self.method == 'interpolation':\n clust = clusters_filtered[cluster_idx]\n idx_0, idx_1 = self.random_state.choice(clust,\n 2,\n replace=False)\n X_0, X_1 = X[idx_0], X[idx_1]\n samples.append(\n self.sample_between_points_componentwise(X_0, X_1))\n elif self.method == 'jittering':\n clust_std = cluster_stds[cluster_idx]\n std = np.min(np.vstack([std_min, clust_std]), axis=0)\n clust = clusters_filtered[cluster_idx]\n idx = self.random_state.choice(clust)\n X_samp = self.sample_by_jittering_componentwise(X[idx],\n std)\n samples.append(X_samp)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.array([self.min_label]*len(samples))]))\n else:\n # otherwise fall back to standard smote\n _logger.warning(self.__class__.__name__ + \": \" +\n \"No clusters with more than 2 elements\")\n return X.copy(), y.copy()\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'method': self.method,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ROSE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{rose,\n author=\"Menardi, Giovanna\n and Torelli, Nicola\",\n title=\"Training and assessing classification rules with\n imbalanced data\",\n journal=\"Data Mining and Knowledge Discovery\",\n year=\"2014\",\n month=\"Jan\",\n day=\"01\",\n volume=\"28\",\n number=\"1\",\n pages=\"92--122\",\n issn=\"1573-756X\",\n doi=\"10.1007/s10618-012-0295-5\",\n url=\"https://doi.org/10.1007/s10618-012-0295-5\"\n }\n\n Notes:\n * It is not entirely clear if the authors propose kernel density\n estimation or the fitting of simple multivariate Gaussians\n on the minority samples. The latter seems to be more likely,\n I implement that approach.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self, proportion=1.0, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0.0)\n\n self.proportion = proportion\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # Estimating the H matrix\n std = np.std(X_min, axis=0)\n d = len(X[0])\n n = len(X_min)\n H = std*(4.0/((d + 1)*n))**(1.0/(d + 4))\n\n # do the sampling\n samples = []\n for _ in range(n_to_sample):\n random_idx = self.random_state.randint(len(X_min))\n samples.append(self.sample_by_gaussian_jittering(\n X_min[random_idx], H))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_OUT(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_out_smote_cosine_selected_smote,\n title={SMOTE-Out, SMOTE-Cosine, and Selected-SMOTE: An\n enhancement strategy to handle imbalance in\n data level},\n author={Fajri Koto},\n journal={2014 International Conference on Advanced\n Computer Science and Information System},\n year={2014},\n pages={280-284}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): parameter of the NearestNeighbors component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n minority_indices = np.where(y == self.min_label)[0]\n\n # nearest neighbors among minority points\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn_min = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs).fit(X_min)\n\n min_distances, min_indices = nn_min.kneighbors(X_min)\n # nearest neighbors among majority points\n n_neighbors = min([len(X_maj), self.n_neighbors+1])\n nn_maj = NearestNeighbors(\n n_neighbors=n_neighbors, n_jobs=self.n_jobs).fit(X_maj)\n maj_distances, maj_indices = nn_maj.kneighbors(X_min)\n\n # generate samples\n samples = []\n for _ in range(n_to_sample):\n # implementation of Algorithm 1 in the paper\n random_idx = self.random_state.choice(\n np.arange(len(minority_indices)))\n u = X[minority_indices[random_idx]]\n v = X_maj[self.random_state.choice(maj_indices[random_idx])]\n dif1 = u - v\n uu = u + self.random_state.random_sample()*0.3*dif1\n x = X_min[self.random_state.choice(min_indices[random_idx][1:])]\n dif2 = uu - x\n w = x + self.random_state.random_sample()*0.5*dif2\n\n samples.append(w)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_Cosine(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_out_smote_cosine_selected_smote,\n title={SMOTE-Out, SMOTE-Cosine, and Selected-SMOTE:\n An enhancement strategy to handle imbalance\n in data level},\n author={Fajri Koto},\n journal={2014 International Conference on Advanced\n Computer Science and Information System},\n year={2014},\n pages={280-284}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): parameter of the NearestNeighbors component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n minority_indices = np.where(y == self.min_label)[0]\n\n # Fitting the nearest neighbors models to the minority and\n # majority data using two different metrics for the minority\n nn_min_euc = NearestNeighbors(n_neighbors=len(X_min),\n n_jobs=self.n_jobs)\n nn_min_euc.fit(X_min)\n nn_min_euc_dist, nn_min_euc_ind = nn_min_euc.kneighbors(X_min)\n\n nn_min_cos = NearestNeighbors(n_neighbors=len(X_min),\n metric='cosine',\n n_jobs=self.n_jobs)\n nn_min_cos.fit(X_min)\n nn_min_cos_dist, nn_min_cos_ind = nn_min_cos.kneighbors(X_min)\n\n nn_maj = NearestNeighbors(n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs)\n nn_maj.fit(X_maj)\n nn_maj_dist, nn_maj_ind = nn_maj.kneighbors(X_min)\n\n samples = []\n for _ in range(n_to_sample):\n random_idx = self.random_state.choice(\n np.arange(len(minority_indices)))\n u = X[minority_indices[random_idx]]\n # get the rank of each minority sample according to their distance\n # from u\n to_sort_euc = zip(\n nn_min_euc_ind[random_idx], np.arange(len(X_min)))\n _, sorted_by_euc_ind = zip(*(sorted(to_sort_euc,\n key=lambda x: x[0])))\n to_sort_cos = zip(\n nn_min_cos_ind[random_idx], np.arange(len(X_min)))\n _, sorted_by_cos_ind = zip(*(sorted(to_sort_cos,\n key=lambda x: x[0])))\n # adding the ranks to get the composite similarity measure (called\n # voting in the paper)\n ranked_min_indices = sorted_by_euc_ind + sorted_by_cos_ind\n # sorting the ranking\n to_sort = zip(ranked_min_indices, np.arange(len(X_min)))\n _, sorted_ranking = zip(*(sorted(to_sort, key=lambda x: x[0])))\n # get the indices of the n_neighbors nearest neighbors according\n # to the composite metrics\n min_indices = sorted_ranking[1:(self.n_neighbors + 1)]\n\n v = X_maj[self.random_state.choice(nn_maj_ind[random_idx])]\n dif1 = u - v\n uu = u + self.random_state.random_sample()*0.3*dif1\n x = X_min[self.random_state.choice(min_indices[1:])]\n dif2 = uu - x\n w = x + self.random_state.random_sample()*0.5*dif2\n samples.append(w)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Selected_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_out_smote_cosine_selected_smote,\n title={SMOTE-Out, SMOTE-Cosine, and Selected-SMOTE: An\n enhancement strategy to handle imbalance in\n data level},\n author={Fajri Koto},\n journal={2014 International Conference on Advanced\n Computer Science and Information System},\n year={2014},\n pages={280-284}\n }\n\n Notes:\n * Significant attribute selection was not described in the paper,\n therefore we have implemented something meaningful.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n perc_sign_attr=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n strategy (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): parameter of the NearestNeighbors component\n perc_sign_attr (float): [0,1] - percentage of significant\n attributes\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_in_range(perc_sign_attr, 'perc_sign_attr', [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.perc_sign_attr = perc_sign_attr\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'perc_sign_attr': [0.3, 0.5, 0.8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n minority_indices = np.where(y == self.min_label)[0]\n\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn_min_euc = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs).fit(X_min)\n\n nn_min_dist, nn_min_ind = nn_min_euc.kneighbors(X_min)\n\n # significant attribute selection was not described in the paper\n # I have implemented significant attribute selection by checking\n # the overlap between ranges of minority and majority class attributes\n # the attributes with bigger overlap respecting their ranges\n # are considered more significant\n min_ranges_a = np.min(X_min, axis=0)\n min_ranges_b = np.max(X_min, axis=0)\n maj_ranges_a = np.min(X_maj, axis=0)\n maj_ranges_b = np.max(X_maj, axis=0)\n\n # end points of overlaps\n max_a = np.max(np.vstack([min_ranges_a, maj_ranges_a]), axis=0)\n min_b = np.min(np.vstack([min_ranges_b, maj_ranges_b]), axis=0)\n\n # size of overlap\n overlap = min_b - max_a\n\n # replacing negative values (no overlap) by zero\n overlap = np.where(overlap < 0, 0, overlap)\n # percentage of overlap compared to the ranges of attributes in the\n # minority set\n percentages = overlap/(min_ranges_b - min_ranges_a)\n # fixing zero division if some attributes have zero range\n percentages = np.nan_to_num(percentages)\n # number of significant attributes to determine\n num_sign_attr = min(\n [1, int(np.rint(self.perc_sign_attr*len(percentages)))])\n\n significant_attr = (percentages >= sorted(\n percentages)[-num_sign_attr]).astype(int)\n\n samples = []\n for _ in range(n_to_sample):\n random_idx = self.random_state.choice(range(len(minority_indices)))\n u = X[minority_indices[random_idx]]\n v = X_min[self.random_state.choice(nn_min_ind[random_idx][1:])]\n samples.append(self.sample_between_points_componentwise(\n u, v, significant_attr))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'perc_sign_attr': self.perc_sign_attr,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass LN_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{ln_smote,\n author={Maciejewski, T. and Stefanowski, J.},\n booktitle={2011 IEEE Symposium on Computational\n Intelligence and Data Mining (CIDM)},\n title={Local neighbourhood extension of SMOTE for\n mining imbalanced data},\n year={2011},\n volume={},\n number={},\n pages={104-111},\n keywords={Bayes methods;data mining;pattern\n classification;local neighbourhood\n extension;imbalanced data mining;\n focused resampling technique;SMOTE\n over-sampling method;naive Bayes\n classifiers;Noise measurement;Noise;\n Decision trees;Breast cancer;\n Sensitivity;Data mining;Training},\n doi={10.1109/CIDM.2011.5949434},\n ISSN={},\n month={April}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): parameter of the NearestNeighbors component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0.0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n if self.n_neighbors + 2 > len(X):\n n_neighbors = len(X) - 2\n else:\n n_neighbors = self.n_neighbors\n\n if n_neighbors < 2:\n return X.copy(), y.copy()\n\n # nearest neighbors of each instance to each instance in the dataset\n nn = NearestNeighbors(n_neighbors=n_neighbors + 2, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X)\n\n minority_indices = np.where(y == self.min_label)[0]\n\n # dimensionality\n d = len(X[0])\n\n def safe_level(p_idx, n_idx=None):\n \"\"\"\n computing the safe level of samples\n\n Args:\n p_idx (int): index of positive sample\n n_idx (int): index of other sample\n\n Returns:\n int: safe level\n \"\"\"\n if n_idx is None:\n # implementation for 1 sample only\n return np.sum(y[indices[p_idx][1:-1]] == self.min_label)\n else:\n # implementation for 2 samples\n if ((not y[n_idx] != self.maj_label)\n and p_idx in indices[n_idx][1:-1]):\n # -1 because p_idx will be replaced\n n_positives = np.sum(\n y[indices[n_idx][1:-1]] == self.min_label) - 1\n if y[indices[n_idx][-1]] == self.min_label:\n # this is the effect of replacing p_idx by the next\n # (k+1)th neighbor\n n_positives = n_positives + 1\n return n_positives\n return np.sum(y[indices[n_idx][1:-1]] == self.min_label)\n\n def random_gap(slp, sln, n_label):\n \"\"\"\n determining random gap\n\n Args:\n slp (int): safe level of p\n sln (int): safe level of n\n n_label (int): label of n\n\n Returns:\n float: gap\n \"\"\"\n delta = 0\n if sln == 0 and slp > 0:\n return delta\n else:\n sl_ratio = slp/sln\n if sl_ratio == 1:\n delta = self.random_state.random_sample()\n elif sl_ratio > 1:\n delta = self.random_state.random_sample()/sl_ratio\n else:\n delta = 1.0 - self.random_state.random_sample()*sl_ratio\n if not n_label == self.min_label:\n delta = delta*sln/(n_neighbors)\n return delta\n\n # generating samples\n trials = 0\n samples = []\n while len(samples) < n_to_sample:\n p_idx = self.random_state.choice(minority_indices)\n # extract random neighbor of p\n n_idx = self.random_state.choice(indices[p_idx][1:-1])\n\n # checking can-create criteria\n slp = safe_level(p_idx)\n sln = safe_level(p_idx, n_idx)\n\n if (not slp == 0) or (not sln == 0):\n # can create\n p = X[p_idx]\n n = X[n_idx]\n x_new = p.copy()\n\n for a in range(d):\n delta = random_gap(slp, sln, y[n_idx])\n diff = n[a] - p[a]\n x_new[a] = p[a] + delta*diff\n samples.append(x_new)\n\n trials = trials + 1\n if len(samples)/trials < 1.0/n_to_sample:\n _logger.info(self.__class__.__name__ + \": \" +\n \"no instances with slp > 0 and sln > 0 found\")\n return X.copy(), y.copy()\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MWMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @ARTICLE{mwmote,\n author={Barua, S. and Islam, M. M. and Yao, X. and\n Murase, K.},\n journal={IEEE Transactions on Knowledge and Data\n Engineering},\n title={MWMOTE--Majority Weighted Minority Oversampling\n Technique for Imbalanced Data Set Learning},\n year={2014},\n volume={26},\n number={2},\n pages={405-425},\n keywords={learning (artificial intelligence);pattern\n clustering;sampling methods;AUC;area under\n curve;ROC;receiver operating curve;G-mean;\n geometric mean;minority class cluster;\n clustering approach;weighted informative\n minority class samples;Euclidean distance;\n hard-to-learn informative minority class\n samples;majority class;synthetic minority\n class samples;synthetic oversampling\n methods;imbalanced learning problems;\n imbalanced data set learning;\n MWMOTE-majority weighted minority\n oversampling technique;Sampling methods;\n Noise measurement;Boosting;Simulation;\n Complexity theory;Interpolation;Abstracts;\n Imbalanced learning;undersampling;\n oversampling;synthetic sample generation;\n clustering},\n doi={10.1109/TKDE.2012.232},\n ISSN={1041-4347},\n month={Feb}}\n\n Notes:\n * The original method was not prepared for the case of having clusters\n of 1 elements.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n k1=5,\n k2=5,\n k3=5,\n M=10,\n cf_th=5.0,\n cmax=10.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n k1 (int): parameter of the NearestNeighbors component\n k2 (int): parameter of the NearestNeighbors component\n k3 (int): parameter of the NearestNeighbors component\n M (int): number of clusters\n cf_th (float): cutoff threshold\n cmax (float): maximum closeness value\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(k1, 'k1', 1)\n self.check_greater_or_equal(k2, 'k2', 1)\n self.check_greater_or_equal(k3, 'k3', 1)\n self.check_greater_or_equal(M, 'M', 1)\n self.check_greater_or_equal(cf_th, 'cf_th', 0)\n self.check_greater_or_equal(cmax, 'cmax', 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.k1 = k1\n self.k2 = k2\n self.k3 = k3\n self.M = M\n self.cf_th = cf_th\n self.cmax = cmax\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'k1': [5, 9],\n 'k2': [5, 9],\n 'k3': [5, 9],\n 'M': [4, 10],\n 'cf_th': [5.0],\n 'cmax': [10.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n minority = np.where(y == self.min_label)[0]\n\n # Step 1\n n_neighbors = min([len(X), self.k1 + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs)\n nn.fit(X)\n dist1, ind1 = nn.kneighbors(X)\n\n # Step 2\n arr = [i for i in minority if np.sum(y[ind1[i][1:]] == self.min_label)]\n filtered_minority = np.array(arr)\n\n if len(filtered_minority) == 0:\n _logger.info(self.__class__.__name__ + \": \" +\n \"filtered_minority array is empty\")\n return X.copy(), y.copy()\n\n # Step 3 - ind2 needs to be indexed by indices of the lengh of X_maj\n nn_maj = NearestNeighbors(n_neighbors=self.k2, n_jobs=self.n_jobs)\n nn_maj.fit(X_maj)\n dist2, ind2 = nn_maj.kneighbors(X[filtered_minority])\n\n # Step 4\n border_majority = np.unique(ind2.flatten())\n\n # Step 5 - ind3 needs to be indexed by indices of the length of X_min\n n_neighbors = min([self.k3, len(X_min)])\n nn_min = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn_min.fit(X_min)\n dist3, ind3 = nn_min.kneighbors(X_maj[border_majority])\n\n # Step 6 - informative minority indexes X_min\n informative_minority = np.unique(ind3.flatten())\n\n def closeness_factor(y, x, cf_th=self.cf_th, cmax=self.cmax):\n \"\"\"\n Closeness factor according to the Eq (6)\n\n Args:\n y (np.array): training instance (border_majority)\n x (np.array): training instance (informative_minority)\n cf_th (float): cutoff threshold\n cmax (float): maximum values\n\n Returns:\n float: closeness factor\n \"\"\"\n d = np.linalg.norm(y - x)/len(y)\n if d == 0.0:\n d = 0.1\n if 1.0/d < cf_th:\n f = 1.0/d\n else:\n f = cf_th\n return f/cf_th*cmax\n\n # Steps 7 - 9\n _logger.info(self.__class__.__name__ + \": \" +\n 'computing closeness factors')\n closeness_factors = np.zeros(\n shape=(len(border_majority), len(informative_minority)))\n for i in range(len(border_majority)):\n bm_i = border_majority[i]\n for j in range(len(informative_minority)):\n im_j = informative_minority[j]\n closeness_factors[i, j] = closeness_factor(X_maj[bm_i],\n X_min[im_j])\n\n _logger.info(self.__class__.__name__ + \": \" +\n 'computing information weights')\n information_weights = np.zeros(\n shape=(len(border_majority), len(informative_minority)))\n for i in range(len(border_majority)):\n norm_factor = np.sum(closeness_factors[i, :])\n for j in range(len(informative_minority)):\n cf_ij = closeness_factors[i, j]\n information_weights[i, j] = cf_ij**2/norm_factor\n\n selection_weights = np.sum(information_weights, axis=0)\n selection_probabilities = selection_weights/np.sum(selection_weights)\n\n # Step 10\n _logger.info(self.__class__.__name__ + \": \" + 'do clustering')\n n_clusters = min([len(X_min), self.M])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X_min)\n imin_labels = kmeans.labels_[informative_minority]\n\n clusters = [np.where(imin_labels == i)[0]\n for i in range(np.max(kmeans.labels_)+1)]\n\n # Step 11\n samples = []\n\n # Step 12\n for i in range(n_to_sample):\n random_index = self.random_state.choice(informative_minority,\n p=selection_probabilities)\n cluster_label = kmeans.labels_[random_index]\n cluster = clusters[cluster_label]\n random_index_in_cluster = self.random_state.choice(cluster)\n X_random = X_min[random_index]\n X_random_cluster = X_min[random_index_in_cluster]\n samples.append(self.sample_between_points(X_random,\n X_random_cluster))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'k1': self.k1,\n 'k2': self.k2,\n 'k3': self.k3,\n 'M': self.M,\n 'cf_th': self.cf_th,\n 'cmax': self.cmax,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass PDFOS(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{pdfos,\n title = \"PDFOS: PDF estimation based over-sampling for\n imbalanced two-class problems\",\n journal = \"Neurocomputing\",\n volume = \"138\",\n pages = \"248 - 259\",\n year = \"2014\",\n issn = \"0925-2312\",\n doi = \"https://doi.org/10.1016/j.neucom.2014.02.006\",\n author = \"Ming Gao and Xia Hong and Sheng Chen and Chris\n J. Harris and Emad Khalaf\",\n keywords = \"Imbalanced classification, Probability density\n function based over-sampling, Radial basis\n function classifier, Orthogonal forward\n selection, Particle swarm optimisation\"\n }\n\n Notes:\n * Not prepared for low-rank data.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_density_estimation]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def _sample_by_kernel_density_estimation(self,\n X,\n n_to_sample,\n n_optimize=100):\n \"\"\"\n Sample n_to_sample instances by kernel density estimation\n\n Args:\n X_min (np.array): minority data\n n_to_sample (int): number of instances to sample\n n_optimize (int): number of vectors used for the optimization\n process\n \"\"\"\n # dimensionality of the data\n m = len(X[0])\n\n # computing the covariance matrix of the data\n S = np.cov(X, rowvar=False)\n message = \"Condition number of covariance matrix: %f\"\n message = message % np.linalg.cond(S)\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n message = \"Inputs size: %d\" % len(X)\n _logger.info(self.__class__.__name__ + \": \" + message)\n _logger.info(self.__class__.__name__ + \": \" + \"Input dim: %d\" % m)\n\n S_mrank = np.linalg.matrix_rank(S, tol=1e-2)\n message = \"Matrix rank of covariance matrix: %d\" % S_mrank\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n # checking the rank of the matrix\n if S_mrank < m:\n message = \"The covariance matrix is singular, fixing it by PCA\"\n _logger.info(self.__class__.__name__ + \": \" + message)\n message = \"dim: %d, rank: %d, size: %d\" % (m, S_mrank, len(X))\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n n_components = max([min([S_mrank, len(X)])-1, 2])\n if n_components == len(X[0]):\n return X.copy()\n\n pca = PCA(n_components=n_components)\n X_low_dim = pca.fit_transform(X)\n X_samp = self._sample_by_kernel_density_estimation(\n X_low_dim, n_to_sample, n_optimize)\n return pca.inverse_transform(X_samp)\n\n S_inv = np.linalg.inv(S)\n det = np.linalg.det(S)\n\n _logger.info(self.__class__.__name__ + \": \" + \"Determinant: %f\" % det)\n\n def eq_9(i, j, sigma, X):\n \"\"\"\n Eq (9) in the paper\n \"\"\"\n tmp = np.dot(np.dot((X[j] - X[i]), S_inv), (X[j] - X[i]))\n numerator = (np.sqrt(2)*sigma)**(-m)*np.exp(-(1/(4*sigma**2))*tmp)\n denominator = ((2*np.pi)**(m/2))\n return numerator/denominator\n\n def eq_5(i, j, sigma, X):\n \"\"\"\n Eq (5) in the paper\n \"\"\"\n tmp = np.dot(np.dot((X[j] - X[i]), S_inv), (X[j] - X[i]))\n numerator = sigma**(-m)*np.exp(-(1/(2*sigma**2))*tmp)\n denominator = ((2.0*np.pi)**(m/2))\n return numerator/denominator\n\n def eq_5_0(sigma, X):\n \"\"\"\n Eq (5) with the same vectors feeded in\n \"\"\"\n return sigma**(-m)/((2.0*np.pi)**(m/2))\n\n def eq_8(i, j, sigma, X):\n \"\"\"\n Eq (8) in the paper\n \"\"\"\n e9 = eq_9(i, j, sigma, X)\n e5 = eq_5(i, j, sigma, X)\n return e9 - 2*e5\n\n def M(sigma, X):\n \"\"\"\n Eq (7) in the paper\n \"\"\"\n total = 0.0\n for i in range(len(X)):\n for j in range(len(X)):\n total = total + eq_8(i, j, sigma, X)\n\n a = total/len(X)**2\n b = 2.0*eq_5_0(sigma, X)/len(X)\n return a + b\n\n # finding the best sigma parameter\n best_sigma = 0\n error = np.inf\n # the dataset is reduced to make the optimization more efficient\n domain = range(len(X))\n n_to_choose = min([len(X), n_optimize])\n X_reduced = X[self.random_state.choice(domain,\n n_to_choose,\n replace=False)]\n\n # we suppose that the data is normalized, thus, this search space\n # should be meaningful\n for sigma in np.logspace(-5, 2, num=20):\n e = M(sigma, X_reduced)\n if e < error:\n error = e\n best_sigma = sigma\n _logger.info(self.__class__.__name__ + \": \" +\n \"best sigma found: %f\" % best_sigma)\n\n # generating samples according to the\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.randint(len(X))\n samples.append(self.random_state.multivariate_normal(\n X[idx], best_sigma*S))\n\n return np.vstack(samples)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # scaling the data to aid numerical stability\n ss = StandardScaler()\n X_ss = ss.fit_transform(X)\n\n X_min = X_ss[y == self.min_label]\n\n # generating samples by kernel density estimation\n samples = self._sample_by_kernel_density_estimation(X_min,\n n_to_sample,\n n_optimize=100)\n\n return (np.vstack([X, ss.inverse_transform(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass IPADE_ID(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{ipade_id,\n title = \"Addressing imbalanced classification with\n instance generation techniques: IPADE-ID\",\n journal = \"Neurocomputing\",\n volume = \"126\",\n pages = \"15 - 28\",\n year = \"2014\",\n note = \"Recent trends in Intelligent Data Analysis Online\n Data Processing\",\n issn = \"0925-2312\",\n doi = \"https://doi.org/10.1016/j.neucom.2013.01.050\",\n author = \"Victoria López and Isaac Triguero and Cristóbal\n J. Carmona and Salvador García and\n Francisco Herrera\",\n keywords = \"Differential evolution, Instance generation,\n Nearest neighbor, Decision tree, Imbalanced\n datasets\"\n }\n\n Notes:\n * According to the algorithm, if the addition of a majority sample\n doesn't improve the AUC during the DE optimization process,\n the addition of no further majority points is tried.\n * In the differential evolution the multiplication by a random number\n seems have a deteriorating effect, new scaling parameter added to\n fix this.\n * It is not specified how to do the evaluation.\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_memetic,\n OverSampling.cat_uses_classifier]\n\n def __init__(self,\n F=0.1,\n G=0.1,\n OT=20,\n max_it=40,\n dt_classifier=DecisionTreeClassifier(random_state=2),\n base_classifier=DecisionTreeClassifier(random_state=2),\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n F (float): control parameter of differential evolution\n G (float): control parameter of the evolution\n OT (int): number of optimizations\n max_it (int): maximum number of iterations for DE_optimization\n dt_classifier (obj): decision tree classifier object\n base_classifier (obj): classifier object\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater(F, 'F', 0)\n self.check_greater(G, 'G', 0)\n self.check_greater(OT, 'OT', 0)\n self.check_greater(max_it, 'max_it', 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.F = F\n self.G = G\n self.OT = OT\n self.max_it = max_it\n self.dt_classifier = dt_classifier\n self.base_classifier = base_classifier\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n # as the OT and max_it parameters control the discovery of the feature\n # space it is enough to try sufficiently large numbers\n dt_classifiers = [DecisionTreeClassifier(random_state=2)]\n base_classifiers = [DecisionTreeClassifier(random_state=2)]\n parameter_combinations = {'F': [0.1, 0.2],\n 'G': [0.1, 0.2],\n 'OT': [30],\n 'max_it': [40],\n 'dt_classifier': dt_classifiers,\n 'base_classifier': base_classifiers}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n mms = MinMaxScaler()\n X = mms.fit_transform(X)\n\n min_indices = np.where(y == self.min_label)[0]\n maj_indices = np.where(y == self.maj_label)[0]\n\n def DE_optimization(GS,\n GS_y,\n X,\n y,\n min_indices,\n maj_indices,\n classifier,\n for_validation):\n \"\"\"\n Implements the DE_optimization method of the paper.\n\n Args:\n GS (np.matrix): actual best training set\n GS_y (np.array): corresponding class labels\n X (np.matrix): complete training set\n y (np.array): all class labels\n min_indices (np.array): array of minority class labels in y\n maj_indices (np.array): array of majority class labels in y\n classifier (object): base classifier\n for_validation (np.array): array of indices for X used for\n validation\n\n Returns:\n np.matrix: optimized training set\n \"\"\"\n # evaluate training set\n AUC_GS = evaluate_ID(\n GS, GS_y, X[for_validation], y[for_validation], classifier)\n\n # optimizing the training set\n for _ in range(self.max_it):\n GS_hat = []\n # doing the differential evolution\n for i in range(len(GS)):\n if GS_y[i] == self.min_label:\n r1, r2, r3 = self.random_state.choice(min_indices,\n 3,\n replace=False)\n else:\n r1, r2, r3 = self.random_state.choice(maj_indices,\n 3,\n replace=False)\n\n random_value = self.random_state.random_sample()\n force_G = X[r1] - X[i]\n force_F = X[r2] - X[r3]\n value = GS[i] + self.G*random_value * \\\n force_G + self.F*force_F\n GS_hat.append(np.clip(value, 0.0, 1.0))\n\n # evaluating the current setting\n AUC_GS_hat = evaluate_ID(GS_hat,\n GS_y,\n X[for_validation],\n y[for_validation],\n classifier)\n\n if AUC_GS_hat > AUC_GS:\n GS = GS_hat\n AUC_GS = AUC_GS_hat\n\n return GS\n\n def evaluate_ID(GS, GS_y, TR, TR_y, base_classifier):\n \"\"\"\n Implements the evaluate_ID function of the paper.\n\n Args:\n GS (np.matrix): actual training set\n GS_y (np.array): list of corresponding class labels\n TR (np.matrix): complete training set\n TR_y (np.array): all class labels\n base_classifier (object): classifier to be used\n\n Returns:\n float: ROC AUC score\n \"\"\"\n base_classifier.fit(GS, GS_y)\n pred = base_classifier.predict_proba(TR)[:, np.where(\n base_classifier.classes_ == self.min_label)[0][0]]\n if len(np.unique(TR_y)) != 2:\n return 0.0\n return roc_auc_score(TR_y, pred)\n\n def evaluate_class(GS, GS_y, TR, TR_y, base_classifier):\n \"\"\"\n Implements the evaluate_ID function of the paper.\n\n Args:\n GS (np.matrix): actual training set\n GS_y (np.array): list of corresponding class labels\n TR (np.matrix): complete training set\n TR_y (np.array): all class labels\n base_classifier (object): classifier to be used\n\n Returns:\n float: accuracy score\n \"\"\"\n base_classifier.fit(GS, GS_y)\n pred = base_classifier.predict(TR)\n return accuracy_score(TR_y, pred)\n\n # Phase 1: Initialization\n _logger.info(self.__class__.__name__ + \": \" + \"Initialization\")\n self.dt_classifier.fit(X, y)\n leafs = self.dt_classifier.apply(X)\n unique_leafs = np.unique(leafs)\n used_in_GS = np.repeat(False, len(X))\n for_validation = np.where(np.logical_not(used_in_GS))[0]\n\n # extracting mean elements of the leafs\n GS = []\n GS_y = []\n for u in unique_leafs:\n indices = np.where(leafs == u)[0]\n GS.append(np.mean(X[indices], axis=0))\n GS_y.append(mode(y[indices]))\n if len(indices) == 1:\n used_in_GS[indices[0]] = True\n\n # updating the indices of the validation set excluding those used in GS\n for_validation = np.where(np.logical_not(used_in_GS))[0]\n _logger.info(self.__class__.__name__ + \": \" +\n \"Size of validation set %d\" % len(for_validation))\n if len(np.unique(y[for_validation])) == 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"No minority samples in validation set\")\n return X.copy(), y.copy()\n if len(np.unique(GS_y)) == 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"No minority samples in reduced dataset\")\n return X.copy(), y.copy()\n\n # DE optimization takes place\n _logger.info(self.__class__.__name__ + \": \" + \"DE optimization\")\n base_classifier = self.base_classifier.__class__(\n **(self.base_classifier.get_params()))\n GS = DE_optimization(GS, GS_y, X, y, min_indices,\n maj_indices, base_classifier, for_validation)\n # evaluate results\n base_classifier = self.base_classifier.__class__(\n **(self.base_classifier.get_params()))\n AUC = evaluate_ID(GS, GS_y, X[for_validation],\n y[for_validation], base_classifier)\n\n # Phase 2: Addition of new instances\n register_class = {self.min_label: 'optimizable',\n self.maj_label: 'optimizable'}\n number_of_optimizations = {self.min_label: 0,\n self.maj_label: 0}\n accuracy_class = {self.min_label: 0, self.maj_label: 0}\n\n _logger.info(self.__class__.__name__ + \": \" + \"Starting optimization\")\n while (AUC < 1.0\n and (register_class[self.min_label] == 'optimizable'\n or register_class[self.maj_label] == 'optimizable')):\n less_accuracy = np.inf\n # loop in line 8\n for i in [self.min_label, self.maj_label]:\n # condition in line 9\n if register_class[i] == 'optimizable':\n y_mask = y[for_validation] == i\n class_for_validation = for_validation[y_mask]\n bp = self.base_classifier.get_params()\n base_classifier = self.base_classifier.__class__(**(bp))\n accuracy_class[i] = evaluate_class(GS,\n GS_y,\n X[class_for_validation],\n y[class_for_validation],\n base_classifier)\n if accuracy_class[i] < less_accuracy:\n less_accuracy = accuracy_class[i]\n target_class = i\n # conditional in line 17\n if (target_class == self.min_label\n and number_of_optimizations[target_class] > 0):\n # it is not clear where does GS_trial coming from in line 18\n GS = DE_optimization(GS,\n GS_y,\n X,\n y,\n min_indices,\n maj_indices,\n base_classifier,\n for_validation)\n else:\n if target_class == self.min_label:\n idx = self.random_state.choice(min_indices)\n else:\n idx = self.random_state.choice(maj_indices)\n\n GS_trial = np.vstack([GS, X[idx]])\n GS_trial_y = np.hstack([GS_y, y[idx]])\n # removing idx from the validation set in order to keep\n # the validation fair\n for_validation_trial = for_validation.tolist()\n if idx in for_validation:\n for_validation_trial.remove(idx)\n\n for_validation_trial = np.array(\n for_validation_trial).astype(int)\n # doing optimization\n GS_trial = DE_optimization(GS_trial,\n GS_trial_y,\n X,\n y,\n min_indices,\n maj_indices,\n base_classifier,\n for_validation)\n\n # line 23\n bp = self.base_classifier.get_params()\n base_classifier = self.base_classifier.__class__(**(bp))\n\n AUC_trial = evaluate_ID(GS_trial,\n GS_trial_y,\n X[for_validation],\n y[for_validation],\n base_classifier)\n # conditional in line 24\n if AUC_trial > AUC:\n AUC = AUC_trial\n GS = GS_trial\n GS_y = GS_trial_y\n for_validation = for_validation_trial\n\n _logger.info(self.__class__.__name__ + \": \" +\n \"Size of validation set %d\" % len(for_validation))\n if len(np.unique(y[for_validation])) == 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"No minority samples in validation set\")\n return X.copy(), y.copy()\n if len(np.unique(GS_y)) == 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"No minority samples in reduced dataset\")\n return X.copy(), y.copy()\n\n number_of_optimizations[target_class] = 0\n else:\n # conditional in line 29\n if (target_class == self.min_label\n and number_of_optimizations[target_class] < self.OT):\n number_of_optimizations[target_class] += 1\n else:\n register_class[target_class] = 'non-optimizable'\n\n return mms.inverse_transform(GS), GS_y\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'F': self.F,\n 'G': self.G,\n 'OT': self.OT,\n 'max_it': self.max_it,\n 'n_jobs': self.n_jobs,\n 'dt_classifier': self.dt_classifier,\n 'base_classifier': self.base_classifier,\n 'random_state': self._random_state_init}\n\n\nclass RWO_sampling(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{rwo_sampling,\n author = {Zhang, Huaxzhang and Li, Mingfang},\n year = {2014},\n month = {11},\n pages = {},\n title = {RWO-Sampling: A Random Walk Over-Sampling Approach\n to Imbalanced Data Classification},\n volume = {20},\n booktitle = {Information Fusion}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n stds = np.diag(np.std(X_min, axis=0)/np.sqrt(len(X_min)))\n\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.randint(len(X_min))\n samples.append(self.random_state.multivariate_normal(X_min[idx],\n stds))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.array([self.min_label]*len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass NEATER(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{neater,\n author={Almogahed, B. A. and Kakadiaris, I. A.},\n booktitle={2014 22nd International Conference on\n Pattern Recognition},\n title={NEATER: Filtering of Over-sampled Data\n Using Non-cooperative Game Theory},\n year={2014},\n volume={},\n number={},\n pages={1371-1376},\n keywords={data handling;game theory;information\n filtering;NEATER;imbalanced data\n problem;synthetic data;filtering of\n over-sampled data using non-cooperative\n game theory;Games;Game theory;Vectors;\n Sociology;Statistics;Silicon;\n Mathematical model},\n doi={10.1109/ICPR.2014.245},\n ISSN={1051-4651},\n month={Aug}}\n\n Notes:\n * Evolving both majority and minority probabilities as nothing ensures\n that the probabilities remain in the range [0,1], and they need to\n be normalized.\n * The inversely weighted function needs to be cut at some value (like\n the alpha level), otherwise it will overemphasize the utility of\n having differing neighbors next to each other.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_borderline,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n smote_n_neighbors=5,\n b=5,\n alpha=0.1,\n h=20,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n smote_n_neighbors (int): number of neighbors in SMOTE sampling\n b (int): number of neighbors\n alpha (float): smoothing term\n h (int): number of iterations in evolution\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(smote_n_neighbors, \"smote_n_neighbors\", 1)\n self.check_greater_or_equal(b, \"b\", 1)\n self.check_greater_or_equal(alpha, \"alpha\", 0)\n self.check_greater_or_equal(h, \"h\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.smote_n_neighbors = smote_n_neighbors\n self.b = b\n self.alpha = alpha\n self.h = h\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'smote_n_neighbors': [3, 5, 7],\n 'b': [3, 5, 7],\n 'alpha': [0.1],\n 'h': [20]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # Applying SMOTE and ADASYN\n X_0, y_0 = SMOTE(proportion=self.proportion,\n n_neighbors=self.smote_n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n X_1, y_1 = ADASYN(n_neighbors=self.b,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n X_new = np.vstack([X_0, X_1[len(X):]])\n y_new = np.hstack([y_0, y_1[len(y):]])\n\n X_syn = X_new[len(X):]\n\n if len(X_syn) == 0:\n return X.copy(), y.copy()\n\n X_all = X_new\n y_all = y_new\n\n # binary indicator indicating synthetic instances\n synthetic = np.hstack(\n [np.array([False]*len(X)), np.array([True]*len(X_syn))])\n\n # initializing strategy probabilities\n prob = np.zeros(shape=(len(X_all), 2))\n prob.fill(0.5)\n for i in range(len(X)):\n if y[i] == self.min_label:\n prob[i, 0], prob[i, 1] = 0.0, 1.0\n else:\n prob[i, 0], prob[i, 1] = 1.0, 0.0\n\n # Finding nearest neighbors, +1 as X_syn is part of X_all and nearest\n # neighbors will be themselves\n nn = NearestNeighbors(n_neighbors=self.b + 1, n_jobs=self.n_jobs)\n nn.fit(X_all)\n distances, indices = nn.kneighbors(X_syn)\n\n # computing distances\n dm = pairwise_distances(X_syn, X_all)\n dm[dm == 0] = 1e-8\n dm = 1.0/dm\n dm[dm > self.alpha] = self.alpha\n\n def wprob_mixed(prob, i):\n ind = indices[i][1:]\n term_0 = 1*prob[i][0]*prob[ind, 0]\n term_1 = dm[i, ind]*(prob[i][1]*prob[ind, 0] +\n prob[i][0]*prob[ind, 1])\n term_2 = 1*prob[i][1]*prob[ind, 1]\n return np.sum(term_0 + term_1 + term_2)\n\n def wprob_min(prob, i):\n term_0 = 0*prob[indices[i][1:], 0]\n term_1 = dm[i, indices[i][1:]]*(1*prob[indices[i][1:], 0] +\n 0*prob[indices[i][1:], 1])\n term_2 = 1*prob[indices[i][1:], 1]\n return np.sum(term_0 + term_1 + term_2)\n\n def wprob_maj(prob, i):\n term_0 = 1*prob[indices[i][1:], 0]\n term_1 = dm[i, indices[i][1:]]*(0*prob[indices[i][1:], 0] +\n 1*prob[indices[i][1:], 1])\n term_2 = 0*prob[indices[i][1:], 1]\n return np.sum(term_0 + term_1 + term_2)\n\n def utilities(prob):\n \"\"\"\n Computes the utilit function\n\n Args:\n prob (np.matrix): strategy probabilities\n\n Returns:\n np.array, np.array, np.array: utility values, minority\n utilities, majority\n utilities\n \"\"\"\n\n domain = range(len(X_syn))\n util_mixed = np.array([wprob_mixed(prob, i) for i in domain])\n util_mixed = np.hstack([np.array([0]*len(X)), util_mixed])\n\n util_min = np.array([wprob_min(prob, i) for i in domain])\n util_min = np.hstack([np.array([0]*len(X)), util_min])\n\n util_maj = np.array([wprob_maj(prob, i) for i in domain])\n util_maj = np.hstack([np.array([0]*len(X)), util_maj])\n\n return util_mixed, util_min, util_maj\n\n def evolution(prob, synthetic, alpha=self.alpha):\n \"\"\"\n Executing one step of the probabilistic evolution\n\n Args:\n prob (np.matrix): strategy probabilities\n synthetic (np.array): flags of synthetic examples\n alpha (float): smoothing function\n\n Returns:\n np.matrix: updated probabilities\n \"\"\"\n util_mixed, util_min, util_maj = utilities(prob)\n\n prob_new = prob.copy()\n synthetic_values = prob[:, 1] * \\\n (alpha + util_min)/(alpha + util_mixed)\n prob_new[:, 1] = np.where(synthetic, synthetic_values, prob[:, 1])\n\n synthetic_values = prob[:, 0] * \\\n (alpha + util_maj)/(alpha + util_mixed)\n prob_new[:, 0] = np.where(synthetic, synthetic_values, prob[:, 0])\n\n norm_factor = np.sum(prob_new, axis=1)\n\n prob_new[:, 0] = prob_new[:, 0]/norm_factor\n prob_new[:, 1] = prob_new[:, 1]/norm_factor\n\n return prob_new\n\n # executing the evolution\n for _ in range(self.h):\n prob = evolution(prob, synthetic)\n\n # determining final labels\n y_all[len(X):] = np.argmax(prob[len(X):], axis=1)\n\n return X_all, y_all\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'smote_n_neighbors': self.smote_n_neighbors,\n 'b': self.b,\n 'alpha': self.alpha,\n 'h': self.h,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass DEAGO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{deago,\n author={Bellinger, C. and Japkowicz, N. and\n Drummond, C.},\n booktitle={2015 IEEE 14th International\n Conference on Machine Learning\n and Applications (ICMLA)},\n title={Synthetic Oversampling for Advanced\n Radioactive Threat Detection},\n year={2015},\n volume={},\n number={},\n pages={948-953},\n keywords={radioactive waste;advanced radioactive\n threat detection;gamma-ray spectral\n classification;industrial nuclear\n facilities;Health Canadas national\n monitoring networks;Vancouver 2010;\n Isotopes;Training;Monitoring;\n Gamma-rays;Machine learning algorithms;\n Security;Neural networks;machine\n learning;classification;class\n imbalance;synthetic oversampling;\n artificial neural networks;\n autoencoders;gamma-ray spectra},\n doi={10.1109/ICMLA.2015.58},\n ISSN={},\n month={Dec}}\n\n Notes:\n * There is no hint on the activation functions and amounts of noise.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_density_estimation,\n OverSampling.cat_application]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n e=100,\n h=0.3,\n sigma=0.1,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n e (int): number of epochs\n h (float): fraction of number of hidden units\n sigma (float): training noise\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0.0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater(e, \"e\", 1)\n self.check_greater(h, \"h\", 0)\n self.check_greater(sigma, \"sigma\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.e = e\n self.h = h\n self.sigma = sigma\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'e': [40],\n 'h': [0.1, 0.2, 0.3, 0.4, 0.5],\n 'sigma': [0.05, 0.1, 0.2]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # ugly hack to get reproducible results from keras with\n # tensorflow backend\n if isinstance(self._random_state_init, int):\n import os\n os.environ['PYTHONHASHSEED'] = str(self._random_state_init)\n import keras as K\n np.random.seed(self._random_state_init)\n import random\n random.seed(self._random_state_init)\n # from tensorflow import set_random_seed\n import tensorflow\n try:\n tensorflow.set_random_seed(self._random_state_init)\n except Exception as e:\n tensorflow.random.set_seed(self._random_state_init)\n else:\n seed = 127\n import os\n os.environ['PYTHONHASHSEED'] = str(seed)\n import keras as K\n np.random.seed(seed)\n import random\n random.seed(seed)\n # from tensorflow import set_random_seed\n import tensorflow\n try:\n tensorflow.compat.v1.set_random_seed(seed)\n except Exception as e:\n tensorflow.random.set_seed(self._random_state_init)\n\n from keras import backend as K\n import tensorflow as tf\n try:\n session_conf = tf.compat.v1.ConfigProto(\n intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n sess = tf.compat.v1.Session(\n graph=tf.compat.v1.get_default_graph(), config=session_conf)\n K.set_session(sess)\n except Exception as e:\n session_conf = tf.compat.v1.ConfigProto(\n intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n sess = tf.compat.v1.Session(\n graph=tf.compat.v1.get_default_graph(), config=session_conf)\n tf.compat.v1.keras.backend.set_session(sess)\n\n if not hasattr(self, 'Input'):\n from keras.layers import Input, Dense, GaussianNoise\n from keras.models import Model\n from tensorflow.keras.callbacks import EarlyStopping\n\n self.Input = Input\n self.Dense = Dense\n self.GaussianNoise = GaussianNoise\n self.Model = Model\n self.EarlyStopping = EarlyStopping\n\n # sampling by smote\n X_samp, y_samp = SMOTE(proportion=self.proportion,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n # samples to map to the manifold extracted by the autoencoder\n X_init = X_samp[len(X):]\n\n if len(X_init) == 0:\n return X.copy(), y.copy()\n\n # normalizing\n X_min = X[y == self.min_label]\n ss = StandardScaler()\n X_min_normalized = ss.fit_transform(X_min)\n X_init_normalized = ss.transform(X_init)\n\n # extracting dimensions\n d = len(X[0])\n encoding_d = max([2, int(np.rint(d*self.h))])\n\n message = \"Input dimension: %d, encoding dimension: %d\"\n message = message % (d, encoding_d)\n _logger.info(self.__class__.__name__ + \": \" + message\n )\n\n # constructing the autoencoder\n callbacks = [self.EarlyStopping(monitor='val_loss', patience=2)]\n\n input_layer = self.Input(shape=(d,))\n noise = self.GaussianNoise(self.sigma)(input_layer)\n encoded = self.Dense(encoding_d, activation='relu')(noise)\n decoded = self.Dense(d, activation='linear')(encoded)\n\n dae = self.Model(input_layer, decoded)\n dae.compile(optimizer='adadelta', loss='mean_squared_error')\n actual_epochs = max([self.e, int(5000.0/len(X_min))])\n\n if len(X_min) > 10:\n val_perc = 0.2\n val_num = int(val_perc*len(X_min))\n X_min_train = X_min_normalized[:-val_num]\n X_min_val = X_min_normalized[-val_num:]\n\n dae.fit(X_min_train,\n X_min_train,\n epochs=actual_epochs,\n validation_data=(X_min_val, X_min_val),\n callbacks=callbacks,\n verbose=0)\n else:\n dae.fit(X_min_normalized, X_min_normalized,\n epochs=actual_epochs, verbose=0)\n\n # mapping the initial samples to the manifold\n samples = ss.inverse_transform(dae.predict(X_init_normalized))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'e': self.e,\n 'h': self.h,\n 'sigma': self.sigma,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Gazzah(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{gazzah,\n author={Gazzah, S. and Hechkel, A. and Essoukri\n Ben Amara, N. },\n booktitle={2015 IEEE 12th International\n Multi-Conference on Systems,\n Signals Devices (SSD15)},\n title={A hybrid sampling method for\n imbalanced data},\n year={2015},\n volume={},\n number={},\n pages={1-6},\n keywords={computer vision;image classification;\n learning (artificial intelligence);\n sampling methods;hybrid sampling\n method;imbalanced data;\n diversification;computer vision\n domain;classical machine learning\n systems;intraclass variations;\n system performances;classification\n accuracy;imbalanced training data;\n training data set;over-sampling;\n minority class;SMOTE star topology;\n feature vector deletion;intra-class\n variations;distribution criterion;\n biometric data;true positive rate;\n Training data;Principal component\n analysis;Databases;Support vector\n machines;Training;Feature extraction;\n Correlation;Imbalanced data sets;\n Intra-class variations;Data analysis;\n Principal component analysis;\n One-against-all SVM},\n doi={10.1109/SSD.2015.7348093},\n ISSN={},\n month={March}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_dim_reduction,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_components=2,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_components (int): number of components in PCA analysis\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_components, \"n_components\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_components = n_components\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_components': [2, 3, 4, 5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # do the oversampling\n pf_smote = polynom_fit_SMOTE(proportion=self.proportion,\n random_state=self.random_state)\n X_samp, y_samp = pf_smote.sample(X, y)\n X_min_samp = X_samp[len(X):]\n\n if len(X_min_samp) == 0:\n return X.copy(), y.copy()\n\n # do the undersampling\n X_maj = X[y == self.maj_label]\n\n # fitting the PCA model\n pca = PCA(n_components=min([len(X[0]), self.n_components]))\n X_maj_trans = pca.fit_transform(X_maj)\n R = np.sqrt(np.sum(np.var(X_maj_trans, axis=0)))\n # determining the majority samples to remove\n to_remove = np.where([np.linalg.norm(x) > R for x in X_maj_trans])[0]\n _logger.info(self.__class__.__name__ + \": \" +\n \"Removing %d majority samples\" % len(to_remove))\n # removing the majority samples\n X_maj = np.delete(X_maj, to_remove, axis=0)\n\n if len(X_min_samp) == 0:\n _logger.info(\"no samples added\")\n return X.copy(), y.copy()\n\n return (np.vstack([X_maj, X_min_samp]),\n np.hstack([np.repeat(self.maj_label, len(X_maj)),\n np.repeat(self.min_label, len(X_min_samp))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_components': self.n_components,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MCT(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{mct,\n author = {Jiang, Liangxiao and Qiu, Chen and Li, Chaoqun},\n year = {2015},\n month = {03},\n pages = {1551004},\n title = {A Novel Minority Cloning Technique for\n Cost-Sensitive Learning},\n volume = {29},\n booktitle = {International Journal of Pattern Recognition\n and Artificial Intelligence}\n }\n\n Notes:\n * Mode is changed to median, distance is changed to Euclidean to\n support continuous features, and normalized.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_copy]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # having continuous variables, the mode is replaced by median\n x_med = np.median(X_min, axis=0)\n distances = np.array([np.linalg.norm(x_med - x) for x in X_min])\n sums = np.sum(distances)\n if sums != 0:\n distances = distances/sums\n\n # distribution of copies is determined (Euclidean distance is a\n # dissimilarity measure which is changed to similarity by subtracting\n # from 1.0)\n distribution = (1.0 - distances)/(np.sum(1.0 - distances))\n\n if any(np.isnan(distribution)):\n _logger.warning(self.__class__.__name__ + \": \" +\n \"NaN in the probability distribution\")\n return X.copy(), y.copy()\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n samples.append(X_min[self.random_state.choice(\n np.arange(len(X_min)), p=distribution)])\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ADG(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{adg,\n author = {Pourhabib, A. and Mallick, Bani K. and Ding, Yu},\n year = {2015},\n month = {16},\n pages = {2695--2724},\n title = {A Novel Minority Cloning Technique for\n Cost-Sensitive Learning},\n volume = {16},\n journal = {Journal of Machine Learning Research}\n }\n\n Notes:\n * This method has a lot of parameters, it becomes fairly hard to\n cross-validate thoroughly.\n * Fails if matrix is singular when computing alpha_star, fixed\n by PCA.\n * Singularity might be caused by repeating samples.\n * Maintaining the kernel matrix becomes unfeasible above a couple\n of thousand vectors.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n kernel='inner',\n lam=1.0,\n mu=1.0,\n k=12,\n gamma=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n kernel (str): 'inner'/'rbf_x', where x is a float, the bandwidth\n lam (float): lambda parameter of the method\n mu (float): mu parameter of the method\n k (int): number of samples to generate in each iteration\n gamma (float): gamma parameter of the method\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n\n if kernel != 'inner' and not kernel.startswith('rbf'):\n raise ValueError(self.__class__.__name__ + \": \" +\n 'Kernel function %s not supported' % kernel)\n elif kernel.startswith('rbf'):\n par = float(kernel.split('_')[-1])\n if par <= 0.0:\n raise ValueError(self.__class__.__name__ + \": \" +\n 'Kernel parameter %f is not supported' % par)\n\n self.check_greater(lam, 'lam', 0)\n self.check_greater(mu, 'mu', 0)\n self.check_greater_or_equal(k, 'k', 1)\n self.check_greater(gamma, 'gamma', 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.kernel = kernel\n self.lam = lam\n self.mu = mu\n self.k = k\n self.gamma = gamma\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'kernel': ['inner', 'rbf_0.5',\n 'rbf_1.0', 'rbf_2.0'],\n 'lam': [1.0, 2.0],\n 'mu': [1.0, 2.0],\n 'k': [12],\n 'gamma': [1.0, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n def bic_score(kmeans, X):\n \"\"\"\n Compute BIC score for clustering\n\n Args:\n kmeans (sklearn.KMeans): kmeans object\n X (np.matrix): clustered data\n\n Returns:\n float: bic value\n\n Inspired by https://stats.stackexchange.com/questions/90769/using-bic-to-estimate-the-number-of-k-in-kmeans\n \"\"\" # noqa\n # extract descriptors of the clustering\n cluster_centers = kmeans.cluster_centers_\n cluster_labels = kmeans.labels_\n n_clusters = kmeans.n_clusters\n n_in_clusters = np.bincount(cluster_labels)\n N, d = X.shape\n\n # compute variance for all clusters beforehand\n\n def sum_norm_2(i):\n return np.sum(np.linalg.norm(X[cluster_labels == i] -\n cluster_centers[i])**2)\n\n cluster_variances = [sum_norm_2(i) for i in range(n_clusters)]\n term_0 = (1.0)/((N - n_clusters) * d)\n term_1 = np.sum(cluster_variances)\n clustering_variance = term_0 * term_1\n\n const_term = 0.5 * n_clusters * np.log(N) * (d+1)\n\n def bic_comp(i):\n term_0 = n_in_clusters[i] * np.log(n_in_clusters[i])\n term_1 = n_in_clusters[i] * np.log(N)\n term_2 = (((n_in_clusters[i] * d) / 2)\n * np.log(2*np.pi*clustering_variance))\n term_3 = ((n_in_clusters[i] - 1) * d / 2)\n\n return term_0 - term_1 - term_2 - term_3\n\n bic = np.sum([bic_comp(i) for i in range(n_clusters)]) - const_term\n\n return bic\n\n def xmeans(X, r=(1, 10)):\n \"\"\"\n Clustering with BIC based n_cluster selection\n\n Args:\n X (np.matrix): data to cluster\n r (tuple): lower and upper bound on the number of clusters\n\n Returns:\n sklearn.KMeans: clustering with lowest BIC score\n \"\"\"\n best_bic = np.inf\n best_clustering = None\n\n # do clustering for all n_clusters in the specified range\n for k in range(r[0], min([r[1], len(X)])):\n kmeans = KMeans(n_clusters=k,\n random_state=self.random_state).fit(X)\n\n bic = bic_score(kmeans, X)\n if bic < best_bic:\n best_bic = bic\n best_clustering = kmeans\n\n return best_clustering\n\n def xgmeans(X, r=(1, 10)):\n \"\"\"\n Gaussian mixture with BIC to select the optimal number\n of components\n\n Args:\n X (np.matrix): data to cluster\n r (tuple): lower and upper bound on the number of components\n\n Returns:\n sklearn.GaussianMixture: Gaussian mixture model with the\n lowest BIC score\n \"\"\"\n best_bic = np.inf\n best_mixture = None\n\n # do model fitting for all n_components in the specified range\n for k in range(r[0], min([r[1], len(X)])):\n gmm = GaussianMixture(\n n_components=k, random_state=self.random_state).fit(X)\n bic = gmm.bic(X)\n if bic < best_bic:\n best_bic = bic\n best_mixture = gmm\n\n return best_mixture\n\n def evaluate_matrices(X, y, kernel=np.inner):\n \"\"\"\n The function evaluates the matrices specified in the method.\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n kernel (function): the kernel function to be used\n\n Returns:\n np.matrix, np.matrix, int, int, np.matrix, np.array,\n np.matrix, np.matrix, np.matrix\n np.array, np.matrix, np.matrix, np.matrix, np.matrix:\n X_minux, X_plus, l_minus, l_plus, X, y, K, M_plus, M_minus,\n M, K_plus, K_minus, N_plus, n_minus using the notations of\n the paper, X and y are ordered by target labels\n \"\"\"\n X_minus = X[y == self.maj_label]\n X_plus = X[y == self.min_label]\n l_minus = len(X_minus)\n l_plus = len(X_plus)\n\n X = np.vstack([X_minus, X_plus])\n y = np.hstack([np.array([self.maj_label]*l_minus),\n np.array([self.min_label]*l_plus)])\n\n K = pairwise_distances(X, X, metric=kernel)\n M_plus = np.mean(K[:, len(X_minus):], axis=1)\n M_minus = np.mean(K[:, :len(X_minus)], axis=1)\n M = np.dot(M_minus - M_plus, M_minus - M_plus)\n\n K_minus = K[:, :len(X_minus)]\n K_plus = K[:, len(X_minus):]\n\n return (X_minus, X_plus, l_minus, l_plus, X, y, K,\n M_plus, M_minus, M, K_plus, K_minus)\n\n # Implementation of the technique, following the steps and notations\n # of the paper\n q = n_to_sample\n\n # instantiating the proper kernel function, the parameter of the RBF\n # is supposed to be the denominator in the Gaussian\n if self.kernel == 'inner':\n kernel_function = np.inner\n else:\n kf = self.kernel.split('_')\n if kf[0] == 'rbf':\n d = float(kf[1])\n def kernel_function(\n x, y): return np.exp(-np.linalg.norm(x - y)**2/d)\n\n # Initial evaluation of the matrices\n (X_minus, X_plus, l_minus, l_plus, X, y, K, M_plus, M_minus,\n M, K_plus, K_minus) = evaluate_matrices(X,\n y,\n kernel=kernel_function)\n # The computing of N matrix is factored into two steps, computing\n # N_plus and N_minus this is used to improve efficiency\n K_plus2 = np.dot(K_plus, K_plus.T)\n K_plus_sum = np.sum(K_plus, axis=1)\n K_plus_diad = np.outer(K_plus_sum, K_plus_sum)/l_plus\n\n K_minus2 = np.dot(K_minus, K_minus.T)\n K_minus_sum = np.sum(K_minus, axis=1)\n K_minus_diad = np.outer(K_minus_sum, K_minus_sum)/l_minus\n\n N = K_plus2 - K_plus_diad + K_minus2 - K_minus_diad\n\n X_plus_hat = X_plus.copy()\n l_minus = len(X_minus)\n\n early_stop = False\n total_added = 0\n # executing the sample generation\n while q > 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"Starting iteration with q=%d\" % q)\n # step 1\n clusters = xmeans(X_plus_hat)\n l_c = np.array([np.sum(clusters.labels_ == i)\n for i in range(clusters.n_clusters)])\n\n # step 2\n k_c = ((1.0/l_c)/(np.sum(1.0/l_c))*self.k).astype(int)\n k_c[k_c == 0] = 1\n lam_c, mu_c = self.lam/l_c, self.mu/l_c\n\n # step 3\n omega = - np.sum([k_c[i]*(lam_c[i])**2/(4*mu_c[i]**2)\n for i in range(len(k_c))])\n nu_c = - 0.5*k_c*lam_c\n M_plus_c = [np.mean(K[:, np.arange(len(X_minus), len(X))[\n clusters.labels_ == i]]) for i in range(len(k_c))]\n\n # step 4\n A = (M - self.gamma*N) - omega*K\n b = np.sum([(M_minus - M_plus_c[i])*nu_c[i]\n for i in range(len(k_c))], axis=0)\n try:\n alpha_star = np.linalg.solve(A, b)\n except Exception as e:\n # handling the issue of singular matrix\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Singular matrix\")\n # deleting huge data structures\n if q == n_to_sample:\n if len(X[0]) == 1:\n return None, None\n K, K_plus, K_minus = None, None, None\n n_components = int(np.sqrt(len(X[0])))\n pca = PCA(n_components=n_components).fit(X)\n\n message = \"reducing dimensionality to %d\" % n_components\n _logger.warning(self.__class__.__name__ + \": \" + message)\n X_trans = pca.transform(X)\n adg = ADG(proportion=self.proportion,\n kernel=self.kernel,\n lam=self.lam,\n mu=self.mu,\n k=self.k,\n gamma=self.gamma,\n random_state=self.random_state)\n X_samp, y_samp = adg.sample(X_trans, y)\n if X_samp is not None:\n return pca.inverse_transform(X_samp), y_samp\n else:\n return X.copy(), y.copy()\n else:\n q = int(q/2)\n continue\n\n # step 5\n mixture = xgmeans(X_plus)\n\n # step 6\n try:\n Z = mixture.sample(q)[0]\n except Exception as e:\n message = \"sampling error in sklearn.mixture.GaussianMixture\"\n _logger.warning(\n self.__class__.__name__ + \": \" + message)\n return X.copy(), y.copy()\n\n # step 7\n # computing the kernel matrix of generated samples with all samples\n K_10 = pairwise_distances(Z, X, metric=kernel_function)\n mask_inner_prod = np.where(np.inner(K_10, alpha_star) > 0)[0]\n Z_hat = Z[mask_inner_prod]\n\n if len(Z_hat) == 0:\n q = int(q/2)\n continue\n\n _logger.info(self.__class__.__name__ + \": \" +\n \"number of vectors added: %d/%d\" % (len(Z_hat), q))\n\n # step 8\n # this step is not used for anything, the identified clusters are\n # only used in step 13 of the paper, however, the values set\n # (M_plus^c) are overwritten in step 3 of the next iteration\n\n # step 9\n X_plus_hat = np.vstack([X_plus_hat, Z_hat])\n l_plus = len(X_plus_hat)\n\n # step 11 - 16\n # these steps have been reorganized a bit for efficient\n # calculations\n\n pairwd = pairwise_distances(Z_hat, Z_hat, metric=kernel_function)\n K = np.block([[K, K_10[mask_inner_prod].T],\n [K_10[mask_inner_prod], pairwd]])\n\n K_minus = K[:, :l_minus]\n K_plus = K[:, l_minus:]\n\n # step 10\n X = np.vstack([X_minus, X_plus_hat])\n y = np.hstack([y, np.repeat(self.min_label, len(Z_hat))])\n\n if early_stop is True:\n break\n\n M_plus = np.mean(K_plus, axis=1)\n M_minus = np.mean(K_minus, axis=1)\n\n # step 13 is already involved in the core of the loop\n M = np.dot(M_minus - M_plus, M_minus - M_plus)\n\n l_new = len(Z_hat)\n total_added = total_added + l_new\n\n K_minus2_01 = np.dot(K_minus[:-l_new:], K_minus[-l_new:].T)\n K_minus2 = np.block([[K_minus2, K_minus2_01],\n [K_minus2_01.T, np.dot(K_minus[-l_new:],\n K_minus[-l_new:].T)]])\n K_minus_sum = M_minus*len(K_minus)\n\n K_plus2 = K_plus2 + np.dot(K_plus[:-l_new, l_new:],\n K_plus[:-l_new, l_new:].T)\n\n K_plus2_01 = np.dot(K_plus[:-l_new], K_plus[-l_new:].T)\n\n K_plus2 = np.block([[K_plus2, K_plus2_01],\n [K_plus2_01.T, np.dot(K_plus[-l_new:],\n K_plus[-l_new:].T)]])\n\n K_plus_sum = M_plus*len(K_plus)\n\n N = K_plus2 - np.outer(K_plus_sum/l_plus, K_plus_sum) + \\\n K_minus2 - np.outer(K_minus_sum/l_minus, K_minus_sum)\n\n # step 17\n if l_new/total_added < 0.01:\n early_stop = True\n else:\n q = int(q/2)\n\n return X.copy(), y.copy()\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'kernel': self.kernel,\n 'lam': self.lam,\n 'mu': self.mu,\n 'k': self.k,\n 'gamma': self.gamma,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_IPF(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_ipf,\n title = \"SMOTE–IPF: Addressing the noisy and borderline\n examples problem in imbalanced\n classification by a re-sampling method\n with filtering\",\n journal = \"Information Sciences\",\n volume = \"291\",\n pages = \"184 - 203\",\n year = \"2015\",\n issn = \"0020-0255\",\n doi = \"https://doi.org/10.1016/j.ins.2014.08.051\",\n author = \"José A. Sáez and Julián Luengo and Jerzy\n Stefanowski and Francisco Herrera\",\n keywords = \"Imbalanced classification,\n Borderline examples,\n Noisy data,\n Noise filters,\n SMOTE\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_uses_classifier]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_folds=9,\n k=3,\n p=0.01,\n voting='majority',\n classifier=DecisionTreeClassifier(random_state=2),\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors in SMOTE sampling\n n_folds (int): the number of partitions\n k (int): used in stopping condition\n p (float): percentage value ([0,1]) used in stopping condition\n voting (str): 'majority'/'consensus'\n classifier (obj): classifier object\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_folds, \"n_folds\", 2)\n self.check_greater_or_equal(k, \"k\", 1)\n self.check_greater_or_equal(p, \"p\", 0)\n self.check_isin(voting, \"voting\", ['majority', 'consensus'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_folds = n_folds\n self.k = k\n self.p = p\n self.voting = voting\n self.classifier = classifier\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n classifiers = [DecisionTreeClassifier(random_state=2)]\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_folds': [9],\n 'k': [3],\n 'p': [0.01],\n 'voting': ['majority', 'consensus'],\n 'classifier': classifiers}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # do SMOTE sampling\n X_samp, y_samp = SMOTE(self.proportion,\n self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n n_folds = min([self.n_folds, np.sum(y == self.min_label)])\n\n condition = 0\n while True:\n # validating the sampled dataset\n validator = StratifiedKFold(n_folds)\n predictions = []\n for train_index, _ in validator.split(X_samp, y_samp):\n self.classifier.fit(X_samp[train_index], y_samp[train_index])\n predictions.append(self.classifier.predict(X_samp))\n\n # do decision based on one of the voting schemes\n if self.voting == 'majority':\n pred_votes = (np.mean(predictions, axis=0) > 0.5).astype(int)\n to_remove = np.where(np.not_equal(pred_votes, y_samp))[0]\n elif self.voting == 'consensus':\n pred_votes = (np.mean(predictions, axis=0) > 0.5).astype(int)\n sum_votes = np.sum(predictions, axis=0)\n to_remove = np.where(np.logical_and(np.not_equal(\n pred_votes, y_samp), np.equal(sum_votes, self.n_folds)))[0]\n else:\n message = 'Voting scheme %s is not implemented' % self.voting\n raise ValueError(self.__class__.__name__ + \": \" + message)\n\n # delete samples incorrectly classified\n _logger.info(self.__class__.__name__ + \": \" +\n 'Removing %d elements' % len(to_remove))\n X_samp = np.delete(X_samp, to_remove, axis=0)\n y_samp = np.delete(y_samp, to_remove)\n\n # if the number of samples removed becomes small or k iterations\n # were done quit\n if len(to_remove) < len(X_samp)*self.p:\n condition = condition + 1\n else:\n condition = 0\n if condition >= self.k:\n break\n\n return X_samp, y_samp\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_folds': self.n_folds,\n 'k': self.k,\n 'p': self.p,\n 'voting': self.voting,\n 'n_jobs': self.n_jobs,\n 'classifier': self.classifier,\n 'random_state': self._random_state_init}\n\n\nclass KernelADASYN(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{kernel_adasyn,\n author={Tang, B. and He, H.},\n booktitle={2015 IEEE Congress on Evolutionary\n Computation (CEC)},\n title={KernelADASYN: Kernel based adaptive\n synthetic data generation for\n imbalanced learning},\n year={2015},\n volume={},\n number={},\n pages={664-671},\n keywords={learning (artificial intelligence);\n pattern classification;\n sampling methods;KernelADASYN;\n kernel based adaptive synthetic\n data generation;imbalanced\n learning;standard classification\n algorithms;data distribution;\n minority class decision rule;\n expensive minority class data\n misclassification;kernel based\n adaptive synthetic over-sampling\n approach;imbalanced data\n classification problems;kernel\n density estimation methods;Kernel;\n Estimation;Accuracy;Measurement;\n Standards;Training data;Sampling\n methods;Imbalanced learning;\n adaptive over-sampling;kernel\n density estimation;pattern\n recognition;medical and\n healthcare data learning},\n doi={10.1109/CEC.2015.7256954},\n ISSN={1089-778X},\n month={May}}\n\n Notes:\n * The method of sampling was not specified, Markov Chain Monte Carlo\n has been implemented.\n * Not prepared for improperly conditioned covariance matrix.\n \"\"\"\n\n categories = [OverSampling.cat_density_estimation,\n OverSampling.cat_extensive,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n k=5,\n h=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n k (int): number of neighbors in the nearest neighbors component\n h (float): kernel bandwidth\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(k, 'k', 1)\n self.check_greater(h, 'h', 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.k = k\n self.h = h\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'k': [5, 7, 9],\n 'h': [0.01, 0.02, 0.05, 0.1, 0.2,\n 0.5, 1.0, 2.0, 10.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting the nearest neighbors model\n nn = NearestNeighbors(n_neighbors=min([len(X_min), self.k+1]),\n n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_min)\n\n # computing majority score\n r = np.array([np.sum(y[indices[i][1:]] == self.maj_label)\n for i in range(len(X_min))])\n\n if np.sum(r > 0) < 2:\n message = (\"majority score is 0 for all or all but one \"\n \"minority samples\")\n _logger.info(self.__class__.__name__ + \": \" + message)\n return X.copy(), y.copy()\n\n r = r/np.sum(r)\n\n # kernel density function\n def p_x(x):\n \"\"\"\n Returns minority density value at x\n\n Args:\n x (np.array): feature vector\n\n Returns:\n float: density value\n \"\"\"\n result = 1.0/(len(X_min)*self.h)\n result = result*(1.0/(np.sqrt(2*np.pi)*self.h)**len(X[0]))\n\n exp_term = np.exp(-0.5*np.linalg.norm(x - X_min, axis=1)**2/self.h)\n return result*np.inner(r, exp_term)\n\n samples = []\n it = 0\n\n # parameters of the Monte Carlo sampling\n burn_in = 1000\n periods = 50\n\n # covariance is used to generate a random sample in the neighborhood\n covariance = np.cov(X_min[r > 0], rowvar=False)\n\n if len(covariance) > 1 and np.linalg.cond(covariance) > 10000:\n message = (\"reducing dimensions due to inproperly conditioned\"\n \"covariance matrix\")\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n if len(X[0]) <= 2:\n _logger.info(self.__class__.__name__ +\n \": \" + \"matrix ill-conditioned\")\n return X.copy(), y.copy()\n\n n_components = int(np.rint(len(covariance)/2))\n\n pca = PCA(n_components=n_components)\n X_trans = pca.fit_transform(X)\n\n ka = KernelADASYN(proportion=self.proportion,\n k=self.k,\n h=self.h,\n random_state=self.random_state)\n\n X_samp, y_samp = ka.sample(X_trans, y)\n return pca.inverse_transform(X_samp), y_samp\n\n # starting Markov-Chain Monte Carlo for sampling\n x_old = X_min[self.random_state.choice(np.where(r > 0)[0])]\n p_old = p_x(x_old)\n\n # Cholesky decomposition\n L = np.linalg.cholesky(covariance)\n\n while len(samples) < n_to_sample:\n x_new = x_old + \\\n np.dot(self.random_state.normal(size=len(x_old)), L)\n p_new = p_x(x_new)\n\n alpha = p_new/p_old\n u = self.random_state.random_sample()\n if u < alpha:\n x_old = x_new\n p_old = p_new\n else:\n pass\n\n it = it + 1\n if it % periods == 0 and it > burn_in:\n samples.append(x_old)\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'k': self.k,\n 'h': self.h,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MOT2LD(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{mot2ld,\n author=\"Xie, Zhipeng\n and Jiang, Liyang\n and Ye, Tengju\n and Li, Xiaoli\",\n editor=\"Renz, Matthias\n and Shahabi, Cyrus\n and Zhou, Xiaofang\n and Cheema, Muhammad Aamir\",\n title=\"A Synthetic Minority Oversampling Method\n Based on Local Densities in Low-Dimensional\n Space for Imbalanced Learning\",\n booktitle=\"Database Systems for Advanced\n Applications\",\n year=\"2015\",\n publisher=\"Springer International Publishing\",\n address=\"Cham\",\n pages=\"3--18\",\n isbn=\"978-3-319-18123-3\"\n }\n\n Notes:\n * Clusters might contain 1 elements, and all points can be filtered\n as noise.\n * Clusters might contain 0 elements as well, if all points are filtered\n as noise.\n * The entire clustering can become empty.\n * TSNE is very slow when the number of instances is over a couple\n of 1000\n \"\"\"\n\n categories = [OverSampling.cat_uses_clustering,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_components=2,\n k=5,\n d_cut='auto',\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_components (int): number of components for stochastic\n neighborhood embedding\n k (int): number of neighbors in the nearest neighbor component\n d_cut (float/str): distance cut value/'auto' for automated\n selection\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_components, 'n_component', 1)\n self.check_greater_or_equal(k, 'k', 1)\n if isinstance(d_cut, float) or isinstance(d_cut, int):\n if d_cut <= 0:\n raise ValueError(self.__class__.__name__ +\n \": \" + 'Non-positive d_cut is not allowed')\n elif d_cut != 'auto':\n raise ValueError(self.__class__.__name__ + \": \" +\n 'd_cut value %s not implemented' % d_cut)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_components = n_components\n self.k = k\n self.d_cut = d_cut\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_components': [2],\n 'k': [3, 5, 7],\n 'd_cut': ['auto']}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n _logger.info(self.__class__.__name__ + \": \" +\n (\"starting TSNE n: %d d: %d\" % (len(X), len(X[0]))))\n # do the stochastic embedding\n X_tsne = TSNE(self.n_components,\n random_state=self.random_state,\n perplexity=10,\n n_iter_without_progress=100,\n n_iter=500,\n verbose=3).fit_transform(X)\n X_min = X_tsne[y == self.min_label]\n _logger.info(self.__class__.__name__ + \": \" + \"TSNE finished\")\n\n # fitting nearest neighbors model for all training data\n n_neighbors = min([len(X_min), self.k + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_tsne)\n distances, indices = nn.kneighbors(X_min)\n\n if isinstance(self.d_cut, float):\n d_cut = self.d_cut\n elif self.d_cut == 'auto':\n d_cut = np.max(distances[:, 1])\n\n # fitting nearest neighbors model to the minority data\n nn_min = NearestNeighbors(n_neighbors=len(X_min), n_jobs=self.n_jobs)\n nn_min.fit(X_min)\n distances_min, indices_min = nn_min.kneighbors(X_min)\n\n def n_rad_neighbors(x):\n x = x.reshape(1, -1)\n return len(nn.radius_neighbors(x, d_cut, return_distance=False)[0])\n\n # extracting the number of neighbors in a given radius\n rho = np.array([n_rad_neighbors(x) for x in X_min])\n closest_highest = []\n delta = []\n\n # implementation of the density peak clustering algorithm\n # based on http://science.sciencemag.org/content/344/6191/1492.full\n for i in range(len(rho)):\n closest_neighbors = indices_min[i]\n closest_densities = rho[closest_neighbors]\n closest_highs = np.where(closest_densities > rho[i])[0]\n\n if len(closest_highs) > 0:\n closest_highest.append(closest_highs[0])\n delta.append(distances_min[i][closest_highs[0]])\n else:\n closest_highest.append(-1)\n delta.append(np.max(distances_min))\n\n to_sort = zip(rho, delta, np.arange(len(rho)))\n r, d, idx = zip(*sorted(to_sort, key=lambda x: x[0]))\n r, d, idx = np.array(r), np.array(d), np.array(idx)\n\n if len(d) < 3:\n return X.copy(), y.copy()\n\n widths = np.arange(1, int(len(r)/2))\n peak_indices = np.array(ssignal.find_peaks_cwt(d, widths=widths))\n\n if len(peak_indices) == 0:\n _logger.info(self.__class__.__name__ + \": \" + \"no peaks found\")\n return X.copy(), y.copy()\n\n cluster_center_indices = idx[peak_indices]\n cluster_centers = X_min[cluster_center_indices]\n\n # finding closest cluster center to minority points and deriving\n # cluster labels\n nn_cluster = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn_cluster.fit(cluster_centers)\n dist_cluster, ind_cluster = nn_cluster.kneighbors(X_min)\n cluster_labels = ind_cluster[:, 0]\n\n # computing local minority counts and determining noisy samples\n def n_min_y(i):\n return np.sum(y[indices[i][1:]] == self.min_label)\n\n local_minority_count = np.array(\n [n_min_y(i) for i in range(len(X_min))])\n\n noise = np.where(np.logical_or(rho == 1, local_minority_count == 0))[0]\n\n # determining importance scores\n importance = local_minority_count/rho\n prob = importance\n prob[noise] = 0.0\n prob = prob/np.sum(prob)\n\n # extracting cluster indices\n cluster_indices = [np.where(cluster_labels == i)[0]\n for i in range(np.max(cluster_labels) + 1)]\n # removing noise from clusters\n cluster_indices = [list(set(c).difference(set(noise)))\n for c in cluster_indices]\n\n # checking if clustering is empty\n empty_clustering = True\n for i in range(len(cluster_indices)):\n if len(cluster_indices[i]) > 0:\n empty_clustering = False\n\n if empty_clustering:\n _logger.info(self.__class__.__name__ + \": \" + \"Empty clustering\")\n return X.copy(), y.copy()\n\n cluster_sizes = np.array([len(c) for c in cluster_indices])\n cluster_indices_size_0 = np.where(cluster_sizes == 0)[0]\n for i in range(len(prob)):\n if cluster_labels[i] in cluster_indices_size_0:\n prob[i] = 0.0\n prob = prob/np.sum(prob)\n\n # carrying out the sampling\n X_min = X[y == self.min_label]\n samples = []\n while len(samples) < n_to_sample:\n # random sample according to the distribution computed\n random_idx = self.random_state.choice(np.arange(len(X_min)),\n p=prob)\n\n # cluster label of the random minority sample\n cluster_label = cluster_labels[random_idx]\n if cluster_label == -1:\n continue\n\n if len(cluster_indices[cluster_label]) == 0:\n continue\n elif len(cluster_indices[cluster_label]) == 1:\n # if the cluster has only 1 elements, it is repeated\n samples.append(X_min[random_idx])\n continue\n else:\n # otherwise a random cluster index is selected for sample\n # generation\n clus = cluster_indices[cluster_label]\n random_neigh_in_clus_idx = self.random_state.choice(clus)\n while random_idx == random_neigh_in_clus_idx:\n random_neigh_in_clus_idx = self.random_state.choice(clus)\n\n X_rand = X_min[random_idx]\n X_in_clus = X_min[random_neigh_in_clus_idx]\n samples.append(self.sample_between_points(X_rand, X_in_clus))\n\n return (np.vstack([np.delete(X, noise, axis=0), np.vstack(samples)]),\n np.hstack([np.delete(y, noise),\n np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_components': self.n_components,\n 'k': self.k,\n 'd_cut': self.d_cut,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass V_SYNTH(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{v_synth,\n author = {Young,Ii, William A. and Nykl, Scott L. and\n Weckman, Gary R. and Chelberg, David M.},\n title = {Using Voronoi Diagrams to Improve\n Classification Performances when Modeling\n Imbalanced Datasets},\n journal = {Neural Comput. Appl.},\n issue_date = {July 2015},\n volume = {26},\n number = {5},\n month = jul,\n year = {2015},\n issn = {0941-0643},\n pages = {1041--1054},\n numpages = {14},\n url = {http://dx.doi.org/10.1007/s00521-014-1780-0},\n doi = {10.1007/s00521-014-1780-0},\n acmid = {2790665},\n publisher = {Springer-Verlag},\n address = {London, UK, UK},\n keywords = {Data engineering, Data mining, Imbalanced\n datasets, Knowledge extraction,\n Numerical algorithms, Synthetic\n over-sampling},\n }\n\n Notes:\n * The proposed encompassing bounding box generation is incorrect.\n * Voronoi diagram generation in high dimensional spaces is instable\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_components=3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_components (int): number of components for PCA\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_components, \"n_component\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_components = n_components\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_components': [3]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # creating the bounding box\n mins = np.min(X, axis=0)\n maxs = np.max(X, axis=0)\n mins = mins - 0.1*np.abs(mins)\n maxs = maxs + 0.1*np.abs(maxs)\n\n dim = len(X[0])\n\n def random_min_maxs():\n return np.where(self.random_state.randint(0, 1, size=dim) == 0,\n mins,\n maxs)\n\n n_bounding_box = min([100, len(X[0])])\n bounding_box = [random_min_maxs() for i in range(n_bounding_box)]\n X_bb = np.vstack([X, bounding_box])\n\n # applying PCA to reduce the dimensionality of the data\n n_components = min([len(X[0]), self.n_components])\n pca = PCA(n_components=n_components)\n X_pca = pca.fit_transform(X_bb)\n y_pca = np.hstack([y, np.repeat(-1, len(bounding_box))])\n\n dm = pairwise_distances(X_pca)\n to_remove = []\n for i in range(len(dm)):\n for j in range(i+1, len(dm)):\n if dm[i, j] < 0.001:\n to_remove.append(i)\n X_pca = np.delete(X_pca, to_remove, axis=0)\n y_pca = np.delete(y_pca, to_remove)\n\n # doing the Voronoi tessellation\n voronoi = sspatial.Voronoi(X_pca)\n\n # extracting those ridge point pairs which are candidates for\n # generating an edge between two cells of different class labels\n candidate_face_generators = []\n for i, r in enumerate(voronoi.ridge_points):\n if r[0] < len(y) and r[1] < len(y) and not y[r[0]] == y[r[1]]:\n candidate_face_generators.append(i)\n\n if len(candidate_face_generators) == 0:\n return X.copy(), y.copy()\n\n # generating samples\n samples = []\n for _ in range(n_to_sample):\n # randomly choosing a pair from the ridge point pairs of different\n # labels\n random_face = self.random_state.choice(candidate_face_generators)\n\n # extracting the vertices of the face between the points\n ridge_vertices = voronoi.ridge_vertices[random_face]\n face_vertices = voronoi.vertices[ridge_vertices]\n\n # creating a random vector for sampling the face (supposed to be\n # convex)\n w = self.random_state.random_sample(size=len(X_pca[0]))\n w = w/np.sum(w)\n\n # initiating a sample point on the face\n sample_point_on_face = np.zeros(len(X_pca[0]))\n for i in range(len(X_pca[0])):\n sample_point_on_face += w[i]*face_vertices[i]\n\n # finding the ridge point with the minority label\n if y[voronoi.ridge_points[random_face][0]] == self.min_label:\n h = voronoi.points[voronoi.ridge_points[random_face][0]]\n else:\n h = voronoi.points[voronoi.ridge_points[random_face][1]]\n\n # generating a point between the minority ridge point and the\n # random point on the face\n samples.append(self.sample_between_points(sample_point_on_face,\n h))\n\n return (np.vstack([X, pca.inverse_transform(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_components': self.n_components,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass OUPS(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{oups,\n title = \"A priori synthetic over-sampling methods for\n increasing classification sensitivity in\n imbalanced data sets\",\n journal = \"Expert Systems with Applications\",\n volume = \"66\",\n pages = \"124 - 135\",\n year = \"2016\",\n issn = \"0957-4174\",\n doi = \"https://doi.org/10.1016/j.eswa.2016.09.010\",\n author = \"William A. Rivera and Petros Xanthopoulos\",\n keywords = \"SMOTE, OUPS, Class imbalance,\n Classification\"\n }\n\n Notes:\n * In the description of the algorithm a fractional number p (j) is\n used to index a vector.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if self.class_stats[self.min_label] < 2:\n message = (\"The number of minority samples (%d) is not enough for\"\n \" sampling\")\n message = message % self.class_stats[self.min_label]\n _logger.warning(self.__class__.__name__ + \": \" + message)\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # extracting propensity scores\n lr = LogisticRegression(solver='lbfgs',\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n lr.fit(X, y)\n propensity = lr.predict_proba(X)\n propensity = propensity[:, np.where(\n lr.classes_ == self.min_label)[0][0]]\n\n # sorting indices according to propensity scores\n prop_sorted = sorted(zip(propensity, np.arange(\n len(propensity))), key=lambda x: -x[0])\n\n p = np.sum(y == self.maj_label)/np.sum(y == self.min_label)\n n = 0\n samples = []\n # implementing Algorithm 1 in the cited paper with some minor changes\n # to enable the proper sampling of p numbers\n while n < len(propensity) and len(samples) < n_to_sample:\n if (y[prop_sorted[n][1]] == self.min_label\n and n < len(propensity) - 1):\n num = 1\n p_tmp = p\n while p_tmp > 0 and n + num < len(propensity):\n if self.random_state.random_sample() < p_tmp:\n samples.append(self.sample_between_points(\n X[prop_sorted[n][1]], X[prop_sorted[n+num][1]]))\n p_tmp = p_tmp - 1\n num = num + 1\n n = n + 1\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_D(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{smote_d,\n author=\"Torres, Fredy Rodr{\\'i}guez\n and Carrasco-Ochoa, Jes{\\'u}s A.\n and Mart{\\'i}nez-Trinidad, Jos{\\'e} Fco.\",\n editor=\"Mart{\\'i}nez-Trinidad, Jos{\\'e} Francisco\n and Carrasco-Ochoa, Jes{\\'u}s Ariel\n and Ayala Ramirez, Victor\n and Olvera-L{\\'o}pez, Jos{\\'e} Arturo\n and Jiang, Xiaoyi\",\n title=\"SMOTE-D a Deterministic Version of SMOTE\",\n booktitle=\"Pattern Recognition\",\n year=\"2016\",\n publisher=\"Springer International Publishing\",\n address=\"Cham\",\n pages=\"177--188\",\n isbn=\"978-3-319-39393-3\"\n }\n\n Notes:\n * Copying happens if two points are the neighbors of each other.\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self, proportion=1.0, k=3, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n k (int): number of neighbors in nearest neighbors component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(k, \"k\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.k = k\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'k': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X_min), self.k+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # extracting standard deviations of distances\n stds = np.std(dist[:, 1:], axis=1)\n\n # estimating sampling density\n if np.sum(stds) > 0:\n p_i = stds/np.sum(stds)\n else:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"zero distribution\")\n return X.copy(), y.copy()\n\n # the other component of sampling density\n p_ij = dist[:, 1:]/np.sum(dist[:, 1:], axis=1)[:, None]\n\n # number of samples to generate between minority points\n counts_ij = n_to_sample*p_i[:, None]*p_ij\n\n # do the sampling\n samples = []\n for i in range(len(p_i)):\n for j in range(min([len(X_min)-1, self.k])):\n while counts_ij[i][j] > 0:\n if self.random_state.random_sample() < counts_ij[i][j]:\n translation = X_min[ind[i][j+1]] - X_min[i]\n weight = counts_ij[i][j] + 1\n samples.append(\n X_min[i] + translation/counts_ij[i][j]+1)\n counts_ij[i][j] = counts_ij[i][j] - 1\n\n if len(samples) > 0:\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n else:\n return X.copy(), y.copy()\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'k': self.k,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_PSO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_pso,\n title = \"PSO-based method for SVM classification on\n skewed data sets\",\n journal = \"Neurocomputing\",\n volume = \"228\",\n pages = \"187 - 197\",\n year = \"2017\",\n note = \"Advanced Intelligent Computing: Theory and\n Applications\",\n issn = \"0925-2312\",\n doi = \"https://doi.org/10.1016/j.neucom.2016.10.041\",\n author = \"Jair Cervantes and Farid Garcia-Lamont and\n Lisbeth Rodriguez and Asdrúbal López and\n José Ruiz Castilla and Adrian Trueba\",\n keywords = \"Skew data sets, SVM, Hybrid algorithms\"\n }\n\n Notes:\n * I find the description of the technique a bit confusing, especially\n on the bounds of the search space of velocities and positions.\n Equations 15 and 16 specify the lower and upper bounds, the lower\n bound is in fact a vector while the upper bound is a distance.\n I tried to implement something meaningful.\n * I also find the setting of accelerating constant 2.0 strange, most\n of the time the velocity will be bounded due to this choice.\n * Also, training and predicting probabilities with a non-linear\n SVM as the evaluation function becomes fairly expensive when the\n number of training vectors reaches a couple of thousands. To\n reduce computational burden, minority and majority vectors far\n from the other class are removed to reduce the size of both\n classes to a maximum of 500 samples. Generally, this shouldn't\n really affect the results as the technique focuses on the samples\n near the class boundaries.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_memetic,\n OverSampling.cat_uses_classifier]\n\n def __init__(self,\n k=3,\n eps=0.05,\n n_pop=10,\n w=1.0,\n c1=2.0,\n c2=2.0,\n num_it=10,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n k (int): number of neighbors in nearest neighbors component, this\n is also the multiplication factor of minority support\n vectors\n eps (float): use to specify the initially generated support\n vectors along minority-majority lines\n n_pop (int): size of population\n w (float): intertia constant\n c1 (float): acceleration constant of local optimum\n c2 (float): acceleration constant of population optimum\n num_it (int): number of iterations\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(k, \"k\", 1)\n self.check_greater(eps, \"eps\", 0)\n self.check_greater_or_equal(n_pop, \"n_pop\", 1)\n self.check_greater_or_equal(w, \"w\", 0)\n self.check_greater_or_equal(c1, \"c1\", 0)\n self.check_greater_or_equal(c2, \"c2\", 0)\n self.check_greater_or_equal(num_it, \"num_it\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.k = k\n self.eps = eps\n self.n_pop = n_pop\n self.w = w\n self.c1 = c1\n self.c2 = c2\n self.num_it = num_it\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return cls.generate_parameter_combinations({'k': [3, 5, 7],\n 'eps': [0.05],\n 'n_pop': [5],\n 'w': [0.5, 1.0],\n 'c1': [1.0, 2.0],\n 'c2': [1.0, 2.0],\n 'num_it': [5]}, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # saving original dataset\n X_orig = X\n y_orig = y\n\n # scaling the records\n mms = MinMaxScaler()\n X_scaled = mms.fit_transform(X)\n\n # removing majority and minority samples far from the training data if\n # needed to increase performance\n performance_threshold = 500\n\n n_maj_to_remove = np.sum(\n y == self.maj_label) - performance_threshold\n if n_maj_to_remove > 0:\n # if majority samples are to be removed\n nn = NearestNeighbors(n_neighbors=1,\n n_jobs=self.n_jobs)\n nn.fit(X_scaled[y == self.min_label])\n dist, ind = nn.kneighbors(X_scaled)\n di = sorted([(dist[i][0], i)\n for i in range(len(ind))], key=lambda x: x[0])\n to_remove = []\n # finding the proper number of samples farest from the minority\n # samples\n for i in reversed(range(len(di))):\n if y[di[i][1]] == self.maj_label:\n to_remove.append(di[i][1])\n if len(to_remove) >= n_maj_to_remove:\n break\n # removing the samples\n X_scaled = np.delete(X_scaled, to_remove, axis=0)\n y = np.delete(y, to_remove)\n\n n_min_to_remove = np.sum(\n y == self.min_label) - performance_threshold\n if n_min_to_remove > 0:\n # if majority samples are to be removed\n nn = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn.fit(X_scaled[y == self.maj_label])\n dist, ind = nn.kneighbors(X_scaled)\n di = sorted([(dist[i][0], i)\n for i in range(len(ind))], key=lambda x: x[0])\n to_remove = []\n # finding the proper number of samples farest from the minority\n # samples\n for i in reversed(range(len(di))):\n if y[di[i][1]] == self.min_label:\n to_remove.append(di[i][1])\n if len(to_remove) >= n_min_to_remove:\n break\n # removing the samples\n X_scaled = np.delete(X_scaled, to_remove, axis=0)\n y = np.delete(y, to_remove)\n\n # fitting SVM to extract initial support vectors\n svc = SVC(kernel='rbf', probability=True,\n gamma='auto', random_state=self.random_state)\n svc.fit(X_scaled, y)\n\n # extracting the support vectors\n SV_min = np.array(\n [i for i in svc.support_ if y[i] == self.min_label])\n SV_maj = np.array(\n [i for i in svc.support_ if y[i] == self.maj_label])\n\n X_SV_min = X_scaled[SV_min]\n X_SV_maj = X_scaled[SV_maj]\n\n # finding nearest majority support vectors\n n_neighbors = min([len(X_SV_maj), self.k])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_SV_maj)\n dist, ind = nn.kneighbors(X_SV_min)\n\n # finding the initial particle and specifying the search space\n X_min_gen = []\n search_space = []\n init_velocity = []\n for i in range(len(SV_min)):\n for j in range(min([len(X_SV_maj), self.k])):\n min_vector = X_SV_min[i]\n maj_vector = X_SV_maj[ind[i][j]]\n # the upper bound of the search space if specified by the\n # closest majority support vector\n upper_bound = X_SV_maj[ind[i][0]]\n # the third element of the search space specification is\n # the distance of the vector and the closest\n # majority support vector, which specifies the radius of\n # the search\n norms = np.linalg.norm(min_vector - upper_bound)\n search_space.append([min_vector, maj_vector, norms])\n # initial particles\n X_min_gen.append(min_vector + self.eps *\n (maj_vector - min_vector))\n # initial velocities\n init_velocity.append(self.eps*(maj_vector - min_vector))\n\n X_min_gen = np.vstack(X_min_gen)\n init_velocity = np.vstack(init_velocity)\n\n # evaluates a specific particle\n def evaluate(X_train, y_train, X_test, y_test):\n \"\"\"\n Trains support vector classifier and evaluates it\n\n Args:\n X_train (np.matrix): training vectors\n y_train (np.array): target labels\n X_test (np.matrix): test vectors\n y_test (np.array): test labels\n \"\"\"\n svc.fit(X_train, y_train)\n y_pred = svc.predict_proba(X_test)[:, np.where(\n svc.classes_ == self.min_label)[0][0]]\n return roc_auc_score(y_test, y_pred)\n\n # initializing the particle swarm and the particle and population level\n # memory\n particle_swarm = [X_min_gen.copy() for _ in range(self.n_pop)]\n velocities = [init_velocity.copy() for _ in range(self.n_pop)]\n local_best = [X_min_gen.copy() for _ in range(self.n_pop)]\n local_best_scores = [0.0]*self.n_pop\n global_best = X_min_gen.copy()\n global_best_score = 0.0\n\n def evaluate_particle(X_scaled, p, y):\n X_extended = np.vstack([X_scaled, p])\n y_extended = np.hstack([y, np.repeat(self.min_label, len(p))])\n return evaluate(X_extended, y_extended, X_scaled, y)\n\n for i in range(self.num_it):\n _logger.info(self.__class__.__name__ + \": \" + \"Iteration %d\" % i)\n # evaluate population\n scores = [evaluate_particle(X_scaled, p, y)\n for p in particle_swarm]\n\n # update best scores\n for i, s in enumerate(scores):\n if s > local_best_scores[i]:\n local_best_scores[i] = s\n local_best[i] = particle_swarm[i]\n if s > global_best_score:\n global_best_score = s\n global_best = particle_swarm[i]\n\n # update velocities\n for i, p in enumerate(particle_swarm):\n term_0 = self.w*velocities[i]\n random_1 = self.random_state.random_sample()\n random_2 = self.random_state.random_sample()\n term_1 = self.c1*random_1*(local_best[i] - p)\n term_2 = self.c2*random_2*(global_best - p)\n\n velocities[i] = term_0 + term_1 + term_2\n\n # bound velocities according to search space constraints\n for v in velocities:\n for i in range(len(v)):\n v_i_norm = np.linalg.norm(v[i])\n if v_i_norm > search_space[i][2]/2.0:\n v[i] = v[i]/v_i_norm*search_space[i][2]/2.0\n\n # update positions\n for i, p in enumerate(particle_swarm):\n particle_swarm[i] = particle_swarm[i] + velocities[i]\n\n # bound positions according to search space constraints\n for p in particle_swarm:\n for i in range(len(p)):\n ss = search_space[i]\n\n trans_vector = p[i] - ss[0]\n trans_norm = np.linalg.norm(trans_vector)\n normed_trans = trans_vector/trans_norm\n\n if trans_norm > ss[2]:\n p[i] = ss[0] + normed_trans*ss[2]\n\n X_ret = np.vstack([X_orig, mms.inverse_transform(global_best)])\n y_ret = np.hstack(\n [y_orig, np.repeat(self.min_label, len(global_best))])\n\n return (X_ret, y_ret)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'k': self.k,\n 'eps': self.eps,\n 'n_pop': self.n_pop,\n 'w': self.w,\n 'c1': self.c1,\n 'c2': self.c2,\n 'num_it': self.num_it,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass CURE_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{cure_smote,\n author=\"Ma, Li\n and Fan, Suohai\",\n title=\"CURE-SMOTE algorithm and hybrid algorithm for\n feature selection and parameter optimization\n based on random forests\",\n journal=\"BMC Bioinformatics\",\n year=\"2017\",\n month=\"Mar\",\n day=\"14\",\n volume=\"18\",\n number=\"1\",\n pages=\"169\",\n issn=\"1471-2105\",\n doi=\"10.1186/s12859-017-1578-z\",\n url=\"https://doi.org/10.1186/s12859-017-1578-z\"\n }\n\n Notes:\n * It is not specified how to determine the cluster with the\n \"slowest growth rate\"\n * All clusters can be removed as noise.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_clusters=5,\n noise_th=2,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_clusters (int): number of clusters to generate\n noise_th (int): below this number of elements the cluster is\n considered as noise\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_clusters, \"n_clusters\", 1)\n self.check_greater_or_equal(noise_th, \"noise_th\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_clusters = n_clusters\n self.noise_th = noise_th\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_clusters': [5, 10, 15],\n 'noise_th': [1, 3]}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # standardizing the data\n mms = MinMaxScaler()\n X_scaled = mms.fit_transform(X)\n\n X_min = X_scaled[y == self.min_label]\n\n # initiating clustering\n clusters = [np.array([i]) for i in range(len(X_min))]\n dm = pairwise_distances(X_min)\n\n # setting the diagonal of the distance matrix to infinity\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # starting the clustering iteration\n iteration = 0\n while len(clusters) > self.n_clusters:\n iteration = iteration + 1\n\n # delete a cluster with slowest growth rate, determined by\n # the cluster size\n if iteration % self.n_clusters == 0:\n # extracting cluster sizes\n cluster_sizes = np.array([len(c) for c in clusters])\n # removing one of the clusters with the smallest size\n to_remove = np.where(cluster_sizes == np.min(cluster_sizes))[0]\n to_remove = self.random_state.choice(to_remove)\n del clusters[to_remove]\n # adjusting the distance matrix accordingly\n dm = np.delete(dm, to_remove, axis=0)\n dm = np.delete(dm, to_remove, axis=1)\n\n # finding the cluster pair with the smallest distance\n min_coord = np.where(dm == np.min(dm))\n merge_a = min_coord[0][0]\n merge_b = min_coord[1][0]\n\n # merging the clusters\n clusters[merge_a] = np.hstack(\n [clusters[merge_a], clusters[merge_b]])\n # removing one of them\n del clusters[merge_b]\n # adjusting the distances in the distance matrix\n dm[merge_a] = np.min(np.vstack([dm[merge_a], dm[merge_b]]), axis=0)\n dm[:, merge_a] = dm[merge_a]\n # removing the row and column corresponding to one of\n # the merged clusters\n dm = np.delete(dm, merge_b, axis=0)\n dm = np.delete(dm, merge_b, axis=1)\n # updating the diagonal\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # removing clusters declared as noise\n to_remove = []\n for i in range(len(clusters)):\n if len(clusters[i]) < self.noise_th:\n to_remove.append(i)\n clusters = [clusters[i]\n for i in range(len(clusters)) if i not in to_remove]\n\n # all clusters can be noise\n if len(clusters) == 0:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"all clusters removed as noise\")\n return X.copy(), y.copy()\n\n # generating samples\n samples = []\n for _ in range(n_to_sample):\n cluster_idx = self.random_state.randint(len(clusters))\n center = np.mean(X_min[clusters[cluster_idx]], axis=0)\n representative = X_min[self.random_state.choice(\n clusters[cluster_idx])]\n samples.append(self.sample_between_points(center, representative))\n\n return (np.vstack([X, mms.inverse_transform(np.vstack(samples))]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_clusters': self.n_clusters,\n 'noise_th': self.noise_th,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SOMO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{somo,\n title = \"Self-Organizing Map Oversampling (SOMO) for\n imbalanced data set learning\",\n journal = \"Expert Systems with Applications\",\n volume = \"82\",\n pages = \"40 - 52\",\n year = \"2017\",\n issn = \"0957-4174\",\n doi = \"https://doi.org/10.1016/j.eswa.2017.03.073\",\n author = \"Georgios Douzas and Fernando Bacao\"\n }\n\n Notes:\n * It is not specified how to handle those cases when a cluster contains\n 1 minority samples, the mean of within-cluster distances is set to\n 100 in these cases.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_grid=10,\n sigma=0.2,\n learning_rate=0.5,\n n_iter=100,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_grid (int): size of grid\n sigma (float): sigma of SOM\n learning_rate (float) learning rate of SOM\n n_iter (int): number of iterations\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_grid, 'n_grid', 2)\n self.check_greater(sigma, 'sigma', 0)\n self.check_greater(learning_rate, 'learning_rate', 0)\n self.check_greater_or_equal(n_iter, 'n_iter', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_grid = n_grid\n self.sigma = sigma\n self.learning_rate = learning_rate\n self.n_iter = n_iter\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_grid': [5, 9, 13],\n 'sigma': [0.4],\n 'learning_rate': [0.3, 0.5],\n 'n_iter': [100]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n N_inter = n_to_sample/2\n N_intra = n_to_sample/2\n\n import minisom\n\n # training SOM\n som = minisom.MiniSom(self.n_grid,\n self.n_grid,\n len(X[0]),\n sigma=self.sigma,\n learning_rate=self.learning_rate,\n random_seed=3)\n som.train_random(X, self.n_iter)\n\n # constructing the grid\n grid_min = {}\n grid_maj = {}\n for i in range(len(y)):\n tmp = som.winner(X[i])\n idx = (tmp[0], tmp[1])\n if idx not in grid_min:\n grid_min[idx] = []\n if idx not in grid_maj:\n grid_maj[idx] = []\n if y[i] == self.min_label:\n grid_min[idx].append(i)\n else:\n grid_maj[idx].append(i)\n\n # converting the grid to arrays\n for i in grid_min:\n grid_min[i] = np.array(grid_min[i])\n for i in grid_maj:\n grid_maj[i] = np.array(grid_maj[i])\n\n # filtering\n filtered = {}\n for i in grid_min:\n if i not in grid_maj:\n filtered[i] = True\n else:\n filtered[i] = (len(grid_maj[i]) + 1)/(len(grid_min[i])+1) < 1.0\n\n # computing densities\n densities = {}\n for i in filtered:\n if filtered[i]:\n if len(grid_min[i]) > 1:\n paird = pairwise_distances(X[grid_min[i]])\n densities[i] = len(grid_min[i])/np.mean(paird)**2\n else:\n densities[i] = 10\n\n # all clusters can be filtered\n if len(densities) == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"all clusters filtered\")\n return X.copy(), y.copy()\n\n # computing neighbour densities, using 4 neighborhood\n neighbors = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n pair_densities = {}\n for i in densities:\n for n in neighbors:\n j = (i[0] + n[0], i[1] + n[1]),\n if j in densities:\n pair_densities[(i, j)] = densities[i] + densities[j]\n\n # computing weights\n density_keys = list(densities.keys())\n density_vals = np.array(list(densities.values()))\n\n # determining pair keys and density values\n pair_keys = list(pair_densities.keys())\n pair_vals = np.array(list(pair_densities.values()))\n\n # determining densities\n density_vals = (1.0/density_vals)/np.sum(1.0/density_vals)\n pair_dens_vals = (1.0/pair_vals)/np.sum(1.0/pair_vals)\n\n # computing num of samples to generate\n if len(pair_vals) > 0:\n dens_num = N_intra\n pair_num = N_inter\n else:\n dens_num = N_inter + N_intra\n pair_num = 0\n\n # generating the samples according to the extracted distributions\n samples = []\n while len(samples) < dens_num:\n cluster_idx = density_keys[self.random_state.choice(\n np.arange(len(density_keys)), p=density_vals)]\n cluster = grid_min[cluster_idx]\n sample_a, sample_b = self.random_state.choice(cluster, 2)\n samples.append(self.sample_between_points(\n X[sample_a], X[sample_b]))\n\n while len(samples) < pair_num:\n idx = pair_keys[self.random_state.choice(\n np.arange(len(pair_keys)), p=pair_dens_vals)]\n cluster_a = grid_min[idx[0]]\n cluster_b = grid_min[idx[1]]\n X_a = X[self.random_state.choice(cluster_a)]\n X_b = X[self.random_state.choice(cluster_b)]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_grid': self.n_grid,\n 'sigma': self.sigma,\n 'learning_rate': self.learning_rate,\n 'n_iter': self.n_iter,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ISOMAP_Hybrid(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{isomap_hybrid,\n author = {Gu, Qiong and Cai, Zhihua and Zhu, Li},\n title = {Classification of Imbalanced Data Sets by\n Using the Hybrid Re-sampling Algorithm\n Based on Isomap},\n booktitle = {Proceedings of the 4th International\n Symposium on Advances in\n Computation and Intelligence},\n series = {ISICA '09},\n year = {2009},\n isbn = {978-3-642-04842-5},\n location = {Huangshi, China},\n pages = {287--296},\n numpages = {10},\n doi = {10.1007/978-3-642-04843-2_31},\n acmid = {1691478},\n publisher = {Springer-Verlag},\n address = {Berlin, Heidelberg},\n keywords = {Imbalanced data set, Isomap, NCR,\n Smote, re-sampling},\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_noise_removal,\n OverSampling.cat_dim_reduction,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_components=3,\n smote_n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n n_components (int): number of components\n smote_n_neighbors (int): number of neighbors in SMOTE sampling\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_components, \"n_components\", 1)\n self.check_greater_or_equal(smote_n_neighbors, \"smote_n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_components = n_components\n self.smote_n_neighbors = smote_n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_components': [2, 3, 4],\n 'smote_n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n self.isomap = Isomap(n_neighbors=self.n_neighbors,\n n_components=self.n_components,\n n_jobs=self.n_jobs)\n\n X_trans = self.isomap.fit_transform(X, y)\n\n X_sm, y_sm = SMOTE(proportion=self.proportion,\n n_neighbors=self.smote_n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X_trans, y)\n\n nc = NeighborhoodCleaningRule(n_jobs=self.n_jobs)\n return nc.remove_noise(X_sm, y_sm)\n\n def preprocessing_transform(self, X):\n \"\"\"\n Transforms new data by the trained isomap\n\n Args:\n X (np.matrix): new data\n\n Returns:\n np.matrix: the transformed data\n \"\"\"\n return self.isomap.transform(X)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_components': self.n_components,\n 'smote_n_neighbors': self.smote_n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass CE_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{ce_smote,\n author={Chen, S. and Guo, G. and Chen, L.},\n booktitle={2010 IEEE 24th International\n Conference on Advanced Information\n Networking and Applications\n Workshops},\n title={A New Over-Sampling Method Based on\n Cluster Ensembles},\n year={2010},\n volume={},\n number={},\n pages={599-604},\n keywords={data mining;Internet;pattern\n classification;pattern clustering;\n over sampling method;cluster\n ensembles;classification method;\n imbalanced data handling;CE-SMOTE;\n clustering consistency index;\n cluster boundary minority samples;\n imbalanced public data set;\n Mathematics;Computer science;\n Electronic mail;Accuracy;Nearest\n neighbor searches;Application\n software;Data mining;Conferences;\n Web sites;Information retrieval;\n classification;imbalanced data\n sets;cluster ensembles;\n over-sampling},\n doi={10.1109/WAINA.2010.40},\n ISSN={},\n month={April}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_borderline,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n h=10,\n k=5,\n alpha=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n h (int): size of ensemble\n k (int): number of clusters/neighbors\n alpha (float): [0,1] threshold to select boundary samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(h, \"h\", 1)\n self.check_greater_or_equal(k, \"k\", 1)\n self.check_in_range(alpha, \"alpha\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.h = h\n self.k = k\n self.alpha = alpha\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'h': [5, 10, 15],\n 'k': [3, 5, 7],\n 'alpha': [0.2, 0.5, 0.8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # do the clustering and labelling\n d = len(X[0])\n labels = []\n for _ in range(self.h):\n f = self.random_state.randint(int(d/2), d)\n features = self.random_state.choice(np.arange(d), f)\n n_clusters = min([len(X), self.k])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X[:, features])\n labels.append(kmeans.labels_)\n\n # do the cluster matching, clustering 0 will be considered the one to\n # match the others to the problem of finding cluster matching is\n # basically the \"assignment problem\"\n base_label = 0\n for i in range(len(labels)):\n if not i == base_label:\n cost_matrix = np.zeros(shape=(self.k, self.k))\n for j in range(self.k):\n mask_j = labels[base_label] == j\n for k in range(self.k):\n mask_k = labels[i] == k\n mask_jk = np.logical_and(mask_j, mask_k)\n cost_matrix[j, k] = np.sum(mask_jk)\n # solving the assignment problem\n row_ind, _ = soptimize.linear_sum_assignment(-cost_matrix)\n # doing the relabeling\n relabeling = labels[i].copy()\n for j in range(len(row_ind)):\n relabeling[labels[i] == k] = j\n labels[i] = relabeling\n\n # compute clustering consistency index\n labels = np.vstack(labels)\n cci = np.apply_along_axis(lambda x: max(\n set(x.tolist()), key=x.tolist().count), 0, labels)\n cci = np.sum(labels == cci, axis=0)\n cci = cci/self.h\n\n # determining minority boundary samples\n P_boundary = X[np.logical_and(\n y == self.min_label, cci < self.alpha)]\n\n # there might be no boundary samples\n if len(P_boundary) <= 1:\n _logger.warning(self.__class__.__name__ + \": \" + \"empty boundary\")\n return X.copy(), y.copy()\n\n # finding nearest neighbors of boundary samples\n n_neighbors = min([len(P_boundary), self.k])\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs)\n nn.fit(P_boundary)\n dist, ind = nn.kneighbors(P_boundary)\n\n # do the sampling\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.randint(len(ind))\n point_a = P_boundary[idx]\n point_b = P_boundary[self.random_state.choice(ind[idx][1:])]\n samples.append(self.sample_between_points(point_a, point_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'h': self.h,\n 'k': self.k,\n 'alpha': self.alpha,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Edge_Det_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{Edge_Det_SMOTE,\n author={Kang, Y. and Won, S.},\n booktitle={ICCAS 2010},\n title={Weight decision algorithm for oversampling\n technique on class-imbalanced learning},\n year={2010},\n volume={},\n number={},\n pages={182-186},\n keywords={edge detection;learning (artificial\n intelligence);weight decision\n algorithm;oversampling technique;\n class-imbalanced learning;class\n imbalanced data problem;edge\n detection algorithm;spatial space\n representation;Classification\n algorithms;Image edge detection;\n Training;Noise measurement;Glass;\n Training data;Machine learning;\n Imbalanced learning;Classification;\n Weight decision;Oversampling;\n Edge detection},\n doi={10.1109/ICCAS.2010.5669889},\n ISSN={},\n month={Oct}}\n\n Notes:\n * This technique is very loosely specified.\n \"\"\"\n\n categories = [OverSampling.cat_density_based,\n OverSampling.cat_borderline,\n OverSampling.cat_extensive]\n\n def __init__(self, proportion=1.0, k=5, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n k (int): number of neighbors\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(k, \"k\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.k = k\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'k': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n d = len(X[0])\n X_min = X[y == self.min_label]\n\n # organizing class labels according to feature ranking\n magnitudes = np.zeros(len(X))\n for i in range(d):\n to_sort = zip(X[:, i], np.arange(len(X)), y)\n _, idx, label = zip(*sorted(to_sort, key=lambda x: x[0]))\n # extracting edge magnitudes in this dimension\n for j in range(1, len(idx)-1):\n magnitudes[idx[j]] = magnitudes[idx[j]] + \\\n (label[j-1] - label[j+1])**2\n\n # density estimation\n magnitudes = magnitudes[y == self.min_label]\n magnitudes = np.sqrt(magnitudes)\n magnitudes = magnitudes/np.sum(magnitudes)\n\n # fitting nearest neighbors models to minority samples\n n_neighbors = min([len(X_min), self.k+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # do the sampling\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.choice(np.arange(len(X_min)), p=magnitudes)\n X_a = X_min[idx]\n X_b = X_min[self.random_state.choice(ind[idx][1:])]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'k': self.k,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass CBSO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{cbso,\n author=\"Barua, Sukarna\n and Islam, Md. Monirul\n and Murase, Kazuyuki\",\n editor=\"Lu, Bao-Liang\n and Zhang, Liqing\n and Kwok, James\",\n title=\"A Novel Synthetic Minority Oversampling\n Technique for Imbalanced Data Set\n Learning\",\n booktitle=\"Neural Information Processing\",\n year=\"2011\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"735--744\",\n isbn=\"978-3-642-24958-7\"\n }\n\n Notes:\n * Clusters containing 1 element induce cloning of samples.\n \"\"\"\n\n categories = [OverSampling.cat_uses_clustering,\n OverSampling.cat_density_based,\n OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n C_p=1.3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n C_p (float): used to set the threshold of clustering\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater(C_p, \"C_p\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.C_p = C_p\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'C_p': [0.8, 1.0, 1.3, 1.6]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model to find neighbors of minority points\n nn = NearestNeighbors(n_neighbors=self.n_neighbors + 1,\n n_jobs=self.n_jobs).fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n # extracting the number of majority neighbors\n weights = [np.sum(y[ind[i][1:]] == self.maj_label)\n for i in range(len(X_min))]\n # determine distribution of generating data\n weights = weights/np.sum(weights)\n\n # do the clustering\n nn = NearestNeighbors(n_neighbors=2, n_jobs=self.n_jobs).fit(X_min)\n d_avg = np.mean(nn.kneighbors(X_min)[0][:, 1])\n T_h = d_avg*self.C_p\n\n # initiating clustering\n clusters = [np.array([i]) for i in range(len(X_min))]\n dm = pairwise_distances(X_min)\n\n # setting the diagonal of the distance matrix to infinity\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # starting the clustering iteration\n while True:\n # finding the cluster pair with the smallest distance\n min_coord = np.where(dm == np.min(dm))\n merge_a = min_coord[0][0]\n merge_b = min_coord[1][0]\n\n # check termination conditions\n if dm[merge_a, merge_b] > T_h or len(dm) == 1:\n break\n\n # merging the clusters\n clusters[merge_a] = np.hstack(\n [clusters[merge_a], clusters[merge_b]])\n # removing one of them\n del clusters[merge_b]\n # adjusting the distances in the distance matrix\n dm[merge_a] = np.min(np.vstack([dm[merge_a], dm[merge_b]]), axis=0)\n dm[:, merge_a] = dm[merge_a]\n # removing the row and column corresponding to one of the\n # merged clusters\n dm = np.delete(dm, merge_b, axis=0)\n dm = np.delete(dm, merge_b, axis=1)\n # updating the diagonal\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # extracting cluster labels\n labels = np.zeros(len(X_min)).astype(int)\n for i in range(len(clusters)):\n for j in clusters[i]:\n labels[j] = i\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(X_min)), p=weights)\n if len(clusters[labels[idx]]) <= 1:\n samples.append(X_min[idx])\n continue\n else:\n random_idx = self.random_state.choice(clusters[labels[idx]])\n while random_idx == idx:\n random_idx = self.random_state.choice(\n clusters[labels[idx]])\n samples.append(self.sample_between_points(\n X_min[idx], X_min[random_idx]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'C_p': self.C_p,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass E_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{e_smote,\n author={Deepa, T. and Punithavalli, M.},\n booktitle={2011 3rd International Conference on\n Electronics Computer Technology},\n title={An E-SMOTE technique for feature selection\n in High-Dimensional Imbalanced Dataset},\n year={2011},\n volume={2},\n number={},\n pages={322-324},\n keywords={bioinformatics;data mining;pattern\n classification;support vector machines;\n E-SMOTE technique;feature selection;\n high-dimensional imbalanced dataset;\n data mining;bio-informatics;dataset\n balancing;SVM classification;micro\n array dataset;Feature extraction;\n Genetic algorithms;Support vector\n machines;Data mining;Machine learning;\n Bioinformatics;Cancer;Imbalanced\n dataset;Featue Selection;E-SMOTE;\n Support Vector Machine[SVM]},\n doi={10.1109/ICECTECH.2011.5941710},\n ISSN={},\n month={April}}\n\n Notes:\n * This technique is basically unreproducible. I try to implement\n something following the idea of applying some simple genetic\n algorithm for optimization.\n * In my best understanding, the technique uses evolutionary algorithms\n for feature selection and then applies vanilla SMOTE on the\n selected features only.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_dim_reduction,\n OverSampling.cat_memetic,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n min_features=2,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in the nearest neighbors\n component\n min_features (int): minimum number of features\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(min_features, \"min_features\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.min_features = min_features\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'min_features': [1, 2, 3]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n min_features = min(self.min_features, len(X[0]))\n\n if len(X) < 800:\n classifier = SVC(gamma='auto', random_state=self.random_state)\n else:\n classifier = DecisionTreeClassifier(\n max_depth=4, random_state=self.random_state)\n\n # parameters of the evolutionary algorithm\n n_generations = 50\n n_population = 5\n\n # creating initial mask\n mask = self.random_state.choice([True, False], len(X[0]), replace=True)\n # fixing if the mask doesn't contain any features\n if np.sum(mask) == 0:\n mask[self.random_state.randint(len(mask))] = True\n\n def crossover(mask_a, mask_b):\n \"\"\"\n Crossover operation for two masks\n\n Args:\n mask_a (np.array): binary mask 1\n mask_b (np.array): binary mask 2\n\n Returns:\n np.array: the result of crossover\n \"\"\"\n mask = mask_a.copy()\n for i in range(len(mask_b)):\n if self.random_state.randint(0, 2) == 0:\n mask[i] = mask_b[i]\n\n while np.sum(mask) < min_features:\n mask[self.random_state.randint(len(mask))] = True\n\n return mask\n\n def mutate(mask_old):\n \"\"\"\n Mutation operation for a mask\n\n Args:\n mask_old (np.array): binary mask\n\n Returns:\n np.array: the result of mutation\n \"\"\"\n mask = mask_old.copy()\n for i in range(len(mask)):\n if self.random_state.randint(0, 2) == 0:\n mask[i] = not mask[i]\n\n while np.sum(mask) < min_features:\n mask[self.random_state.randint(len(mask))] = True\n\n return mask\n\n # generating initial population\n population = [[0, mask.copy()] for _ in range(n_population)]\n for _ in range(n_generations):\n # in each generation\n for _ in range(n_population):\n # for each element of a population\n if self.random_state.randint(0, 2) == 0:\n # crossover\n i_0 = self.random_state.randint(n_population)\n i_1 = self.random_state.randint(n_population)\n mask = crossover(population[i_0][1], population[i_1][1])\n else:\n # mutation\n idx = self.random_state.randint(n_population)\n mask = mutate(population[idx][1])\n # evaluation\n message = \"evaluating mask selection with features %d/%d\"\n message = message % (np.sum(mask), len(mask))\n _logger.info(self.__class__.__name__ + \": \" + message)\n classifier.fit(X[:, mask], y)\n score = np.sum(y == classifier.predict(X[:, mask]))/len(y)\n # appending the result to the population\n population.append([score, mask])\n # sorting the population in a reversed order and keeping the\n # elements with the highest scores\n population = sorted(population, key=lambda x: -x[0])[:n_population]\n\n self.mask = population[0][1]\n # resampling the population in the given dimensions\n\n smote = SMOTE(self.proportion,\n self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n\n return smote.sample(X[:, self.mask], y)\n\n def preprocessing_transform(self, X):\n \"\"\"\n Transform new data by the learnt transformation\n\n Args:\n X (np.matrix): new data\n\n Returns:\n np.matrix: transformed data\n \"\"\"\n return X[:, self.mask]\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'min_features': self.min_features,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass DBSMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{dbsmote,\n author=\"Bunkhumpornpat, Chumphol\n and Sinapiromsaran, Krung\n and Lursinsap, Chidchanok\",\n title=\"DBSMOTE: Density-Based Synthetic Minority\n Over-sampling TEchnique\",\n journal=\"Applied Intelligence\",\n year=\"2012\",\n month=\"Apr\",\n day=\"01\",\n volume=\"36\",\n number=\"3\",\n pages=\"664--684\",\n issn=\"1573-7497\",\n doi=\"10.1007/s10489-011-0287-y\",\n url=\"https://doi.org/10.1007/s10489-011-0287-y\"\n }\n\n Notes:\n * Standardization is needed to use absolute eps values.\n * The clustering is likely to identify all instances as noise, fixed\n by recursive call with increaseing eps.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_noise_removal,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_density_based]\n\n def __init__(self,\n proportion=1.0,\n eps=0.8,\n min_samples=3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n eps (float): eps paramter of DBSCAN\n min_samples (int): min_samples paramter of DBSCAN\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater(eps, \"eps\", 0)\n self.check_greater_or_equal(min_samples, \"min_samples\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.eps = eps\n self.min_samples = min_samples\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'eps': [0.5, 0.8, 1.2],\n 'min_samples': [1, 3, 5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n ss = StandardScaler().fit(X)\n X_ss = ss.transform(X)\n\n # doing the clustering using DBSCAN\n X_min = X_ss[y == self.min_label]\n db = DBSCAN(self.eps, self.min_samples, n_jobs=self.n_jobs).fit(X_min)\n labels = db.labels_\n num_labels = np.max(labels)+1\n\n if num_labels == 0:\n # adjusting the parameters if no clusters were identified\n message = (\"Number of clusters is 0, trying to increase eps and \"\n \"decrease min_samples\")\n _logger.info(self.__class__.__name__ + \": \" + message)\n if self.eps >= 2 or self.min_samples <= 2:\n message = (\"Number of clusters is 0, can't adjust parameters \"\n \"further\")\n _logger.info(self.__class__.__name__ + \": \" + message)\n return X.copy(), y.copy()\n else:\n return DBSMOTE(proportion=self.proportion,\n eps=self.eps*1.5,\n min_samples=self.min_samples-1,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n # determining cluster size distribution\n clusters = [np.where(labels == i)[0] for i in range(num_labels)]\n cluster_sizes = np.array([np.sum(labels == i)\n for i in range(num_labels)])\n cluster_dist = cluster_sizes/np.sum(cluster_sizes)\n\n # Bellman-Ford algorithm, inspired by\n # https://gist.github.com/joninvski/701720\n def initialize(graph, source):\n \"\"\"\n Initializes shortest path algorithm.\n\n Args:\n graph (dict): graph in dictionary representation\n source (key): source node\n\n Returns:\n dict, dict: initialized distance and path dictionaries\n \"\"\"\n d = {}\n p = {}\n for node in graph:\n d[node] = float('Inf')\n p[node] = None\n d[source] = 0\n return d, p\n\n def relax(u, v, graph, d, p):\n \"\"\"\n Checks if shorter path exists.\n\n Args:\n u (key): key of a node\n v (key): key of another node\n graph (dict): the graph object\n d (dict): the distances dictionary\n p (dict): the paths dictionary\n \"\"\"\n if d[v] > d[u] + graph[u][v]:\n d[v] = d[u] + graph[u][v]\n p[v] = u\n\n def bellman_ford(graph, source):\n \"\"\"\n Main entry point of the Bellman-Ford algorithm\n\n Args:\n graph (dict): a graph in dictionary representation\n source (key): the key of the source node\n \"\"\"\n d, p = initialize(graph, source)\n for i in range(len(graph)-1):\n for u in graph:\n for v in graph[u]:\n relax(u, v, graph, d, p)\n for u in graph:\n for v in graph[u]:\n assert d[v] <= d[u] + graph[u][v]\n return d, p\n\n # extract graphs and center-like objects\n graphs = []\n centroid_indices = []\n shortest_paths = []\n for c in range(num_labels):\n # extracting the cluster elements\n cluster = X_min[clusters[c]]\n # initializing the graph object\n graph = {}\n for i in range(len(cluster)):\n graph[i] = {}\n\n # fitting nearest neighbors model to the cluster elements\n nn = NearestNeighbors(n_neighbors=len(cluster), n_jobs=self.n_jobs)\n nn.fit(cluster)\n dist, ind = nn.kneighbors(cluster)\n\n # extracting graph edges according to directly density reachabality\n # definition\n for i in range(len(cluster)):\n n = min([len(cluster), (self.min_samples + 1)])\n index_set = ind[i][1:n]\n for j in range(len(cluster)):\n if j in index_set and dist[i][ind[i] == j][0] < self.eps:\n graph[i][j] = dist[i][ind[i] == j][0]\n graphs.append(graph)\n # finding the index of the center like object\n centroid_ind = nn.kneighbors(\n np.mean(cluster, axis=0).reshape(1, -1))[1][0][0]\n centroid_indices.append(centroid_ind)\n # extracting shortest paths from centroid object\n shortest_paths.append(bellman_ford(graph, centroid_ind))\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n cluster_idx = self.random_state.choice(\n np.arange(len(clusters)), p=cluster_dist)\n cluster = X_min[clusters[cluster_idx]]\n idx = self.random_state.choice(range(len(clusters[cluster_idx])))\n\n # executing shortest path algorithm\n distances, parents = shortest_paths[cluster_idx]\n\n # extracting path\n path = [idx]\n while not parents[path[-1]] is None:\n path.append(parents[path[-1]])\n\n if len(path) == 1:\n # if the center like object is selected\n samples.append(cluster[path[0]])\n elif len(path) == 2:\n # if the path consists of 1 edge\n X_a = cluster[path[0]]\n X_b = cluster[path[1]]\n sample = self.sample_between_points_componentwise(X_a, X_b)\n samples.append(sample)\n else:\n # if the path consists of at least two edges\n random_vertex = self.random_state.randint(len(path)-1)\n X_a = cluster[path[random_vertex]]\n X_b = cluster[path[random_vertex + 1]]\n sample = self.sample_between_points_componentwise(X_a, X_b)\n samples.append(sample)\n\n return (np.vstack([X, ss.inverse_transform(np.vstack(samples))]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'eps': self.eps,\n 'min_samples': self.min_samples,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ASMOBD(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{asmobd,\n author={Senzhang Wang and Zhoujun Li and Wenhan\n Chao and Qinghua Cao},\n booktitle={The 2012 International Joint Conference\n on Neural Networks (IJCNN)},\n title={Applying adaptive over-sampling technique\n based on data density and cost-sensitive\n SVM to imbalanced learning},\n year={2012},\n volume={},\n number={},\n pages={1-8},\n doi={10.1109/IJCNN.2012.6252696},\n ISSN={2161-4407},\n month={June}}\n\n Notes:\n * In order to use absolute thresholds, the data is standardized.\n * The technique has many parameters, not easy to find the right\n combination.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_noise_removal,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n min_samples=3,\n eps=0.8,\n eta=0.5,\n T_1=1.0,\n T_2=1.0,\n t_1=4.0,\n t_2=4.0,\n a=0.05,\n smoothing='linear',\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n min_samples (int): parameter of OPTICS\n eps (float): parameter of OPTICS\n eta (float): tradeoff paramter\n T_1 (float): noise threshold (see paper)\n T_2 (float): noise threshold (see paper)\n t_1 (float): noise threshold (see paper)\n t_2 (float): noise threshold (see paper)\n a (float): smoothing factor (see paper)\n smoothing (str): 'sigmoid'/'linear'\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(min_samples, \"min_samples\", 1)\n self.check_greater(eps, \"eps\", 0)\n self.check_in_range(eta, \"eta\", [0, 1])\n self.check_greater(T_1, \"T_1\", 0)\n self.check_greater(T_2, \"T_2\", 0)\n self.check_greater(t_1, \"t_1\", 0)\n self.check_greater(t_2, \"t_2\", 0)\n self.check_greater(a, \"a\", 0)\n self.check_isin(smoothing, \"smoothing\", ['sigmoid', 'linear'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.min_samples = min_samples\n self.eps = eps\n self.eta = eta\n self.T_1 = T_1\n self.T_2 = T_2\n self.t_1 = t_1\n self.t_2 = t_2\n self.a = a\n self.smoothing = smoothing\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'min_samples': [3],\n 'eps': [0.3],\n 'eta': [0.5],\n 'T_1': [0.7, 1.0, 1.4],\n 'T_2': [0.7, 1.0, 1.4],\n 't_1': [4.0],\n 't_2': [4.0],\n 'a': [0.05, 0.1],\n 'smoothing': ['sigmoid', 'linear']}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # standardizing the data to enable using absolute thresholds\n ss = StandardScaler().fit(X)\n X_ss = ss.transform(X)\n\n X_min = X_ss[y == self.min_label]\n\n # executing the optics algorithm\n min_samples = min([len(X_min)-1, self.min_samples])\n o = OPTICS(min_samples=min_samples,\n max_eps=self.eps,\n n_jobs=self.n_jobs)\n o.fit(X_min)\n cd = o.core_distances_\n r = o.reachability_\n\n # identifying noise\n noise = np.logical_and(cd > self.T_1, r > self.T_2)\n\n # fitting nearest neighbors models to identify the number of majority\n # samples in local environments\n nn = NearestNeighbors(n_neighbors=self.min_samples, n_jobs=self.n_jobs)\n nn.fit(X_ss)\n n_majs = []\n ratio = []\n for i in range(len(X_min)):\n ind = nn.radius_neighbors(X_min[i].reshape(\n 1, -1), radius=cd[i], return_distance=False)[0]\n n_maj = np.sum(y[ind] == self.maj_label)/len(ind)\n n_majs.append(n_maj)\n n_min = len(ind) - n_maj - 1\n if n_min == 0:\n ratio.append(np.inf)\n else:\n ratio.append(n_maj/n_min)\n\n n_maj = np.array(n_maj)\n ratio = np.array(ratio)\n\n # second constraint on noise\n noise_2 = np.logical_and(cd > np.mean(\n cd)*self.t_1, r > np.mean(r)*self.t_2)\n\n # calculating density according to the smoothing function specified\n if self.smoothing == 'sigmoid':\n balance_ratio = np.abs(2.0/(1.0 + np.exp(-self.a*ratio[i])) - 1.0)\n df = self.eta*cd + (1.0 - self.eta)*n_maj - balance_ratio\n else:\n df = self.eta*(self.eta*cd + (1.0 - self.eta)*n_maj) + \\\n (1 - self.eta)*len(X_min)/n_to_sample\n\n # unifying the conditions on noise\n not_noise = np.logical_not(np.logical_or(noise, noise_2))\n\n # checking if there are not noise samples remaining\n if np.sum(not_noise) == 0:\n message = (\"All minority samples found to be noise, increasing\"\n \"noise thresholds\")\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n return ASMOBD(proportion=self.proportion,\n min_samples=self.min_samples,\n eps=self.eps,\n eta=self.eta,\n T_1=self.T_1*1.5,\n T_2=self.T_2*1.5,\n t_1=self.t_1*1.5,\n t_2=self.t_2*1.5,\n a=self.a,\n smoothing=self.smoothing,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n # removing noise and adjusting the density factors accordingly\n X_min_not_noise = X_min[not_noise]\n\n # checking if there are not-noisy samples\n if len(X_min_not_noise) <= 2:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"no not-noise minority sample remained\")\n return X.copy(), y.copy()\n\n df = np.delete(df, np.where(np.logical_not(not_noise))[0])\n density = df/np.sum(df)\n\n # fitting nearest neighbors model to non-noise minority samples\n n_neighbors = min([len(X_min_not_noise), self.min_samples + 1])\n nn_not_noise = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs)\n nn_not_noise.fit(X_min_not_noise)\n dist, ind = nn_not_noise.kneighbors(X_min_not_noise)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(X_min_not_noise)),\n p=density)\n random_neighbor_idx = self.random_state.choice(ind[idx][1:])\n X_a = X_min_not_noise[idx]\n X_b = X_min_not_noise[random_neighbor_idx]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, ss.inverse_transform(np.vstack(samples))]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'min_samples': self.min_samples,\n 'eps': self.eps,\n 'eta': self.eta,\n 'T_1': self.T_1,\n 'T_2': self.T_2,\n 't_1': self.t_1,\n 't_2': self.t_2,\n 'a': self.a,\n 'smoothing': self.smoothing,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Assembled_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{assembled_smote,\n author={Zhou, B. and Yang, C. and Guo, H. and\n Hu, J.},\n booktitle={The 2013 International Joint Conference\n on Neural Networks (IJCNN)},\n title={A quasi-linear SVM combined with assembled\n SMOTE for imbalanced data classification},\n year={2013},\n volume={},\n number={},\n pages={1-7},\n keywords={approximation theory;interpolation;\n pattern classification;sampling\n methods;support vector machines;trees\n (mathematics);quasilinear SVM;\n assembled SMOTE;imbalanced dataset\n classification problem;oversampling\n method;quasilinear kernel function;\n approximate nonlinear separation\n boundary;mulitlocal linear boundaries;\n interpolation;data distribution\n information;minimal spanning tree;\n local linear partitioning method;\n linear separation boundary;synthetic\n minority class samples;oversampled\n dataset classification;standard SVM;\n composite quasilinear kernel function;\n artificial data datasets;benchmark\n datasets;classification performance\n improvement;synthetic minority\n over-sampling technique;Support vector\n machines;Kernel;Merging;Standards;\n Sociology;Statistics;Interpolation},\n doi={10.1109/IJCNN.2013.6707035},\n ISSN={2161-4407},\n month={Aug}}\n\n Notes:\n * Absolute value of the angles extracted should be taken.\n (implemented this way)\n * It is not specified how many samples are generated in the various\n clusters.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_borderline,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n pop=2,\n thres=0.3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n pop (int): lower threshold on cluster sizes\n thres (float): threshold on angles\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(pop, \"pop\", 1)\n self.check_in_range(thres, \"thres\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.pop = pop\n self.thres = thres\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'pop': [2, 4, 5],\n 'thres': [0.1, 0.3, 0.5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n # finding the set of border and non-border minority elements\n n_min_neighbors = [np.sum(y[ind[i]] == self.min_label)\n for i in range(len(ind))]\n border_mask = np.logical_not(np.array(n_min_neighbors) == n_neighbors)\n X_border = X_min[border_mask]\n X_non_border = X_min[np.logical_not(border_mask)]\n\n if len(X_border) == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"X_border is empty\")\n return X.copy(), y.copy()\n\n # initializing clustering\n clusters = [np.array([i]) for i in range(len(X_border))]\n dm = pairwise_distances(X_border)\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # do the clustering\n while len(dm) > 1 and np.min(dm) < np.inf:\n # extracting coordinates of clusters with the minimum distance\n min_coord = np.where(dm == np.min(dm))\n merge_a = min_coord[0][0]\n merge_b = min_coord[1][0]\n\n # checking the size of clusters to see if they should be merged\n if (len(clusters[merge_a]) < self.pop\n or len(clusters[merge_b]) < self.pop):\n # if both clusters are small, do the merge\n clusters[merge_a] = np.hstack([clusters[merge_a],\n clusters[merge_b]])\n del clusters[merge_b]\n # update the distance matrix accordingly\n dm[merge_a] = np.min(np.vstack([dm[merge_a], dm[merge_b]]),\n axis=0)\n dm[:, merge_a] = dm[merge_a]\n # remove columns\n dm = np.delete(dm, merge_b, axis=0)\n dm = np.delete(dm, merge_b, axis=1)\n # fix the diagonal entries\n for i in range(len(dm)):\n dm[i, i] = np.inf\n else:\n # otherwise find principal directions\n pca_a = PCA(n_components=1).fit(X_border[clusters[merge_a]])\n pca_b = PCA(n_components=1).fit(X_border[clusters[merge_b]])\n # extract the angle of principal directions\n numerator = np.dot(pca_a.components_[0], pca_b.components_[0])\n denominator = np.linalg.norm(pca_a.components_[0])\n denominator *= np.linalg.norm(pca_b.components_[0])\n angle = abs(numerator/denominator)\n # check if angle if angle is above a specific threshold\n if angle > self.thres:\n # do the merge\n clusters[merge_a] = np.hstack([clusters[merge_a],\n clusters[merge_b]])\n del clusters[merge_b]\n # update the distance matrix acoordingly\n dm[merge_a] = np.min(np.vstack([dm[merge_a], dm[merge_b]]),\n axis=0)\n dm[:, merge_a] = dm[merge_a]\n # remove columns\n dm = np.delete(dm, merge_b, axis=0)\n dm = np.delete(dm, merge_b, axis=1)\n # fixing the digaonal entries\n for i in range(len(dm)):\n dm[i, i] = np.inf\n else:\n # changing the distance of clusters to fininte\n dm[merge_a, merge_b] = np.inf\n dm[merge_b, merge_a] = np.inf\n\n # extract vectors belonging to the various clusters\n vectors = [X_border[c] for c in clusters if len(c) > 0]\n # adding non-border samples\n if len(X_non_border) > 0:\n vectors.append(X_non_border)\n\n # extract cluster sizes and calculating point distribution in clusters\n # the last element of the clusters is the set of non-border xamples\n cluster_sizes = np.array([len(v) for v in vectors])\n densities = cluster_sizes/np.sum(cluster_sizes)\n\n # extracting nearest neighbors in clusters\n def fit_knn(vectors):\n n_neighbors = min([self.n_neighbors + 1, len(vectors)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n return nn.fit(vectors).kneighbors(vectors)\n\n nns = [fit_knn(v) for v in vectors]\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n cluster_idx = self.random_state.choice(len(vectors), p=densities)\n len_cluster = len(vectors[cluster_idx])\n sample_idx = self.random_state.choice(np.arange(len_cluster))\n\n if len_cluster > 1:\n choose_from = nns[cluster_idx][1][sample_idx][1:]\n random_neighbor_idx = self.random_state.choice(choose_from)\n else:\n random_neighbor_idx = sample_idx\n\n X_a = vectors[cluster_idx][sample_idx]\n X_b = vectors[cluster_idx][random_neighbor_idx]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'pop': self.pop,\n 'thres': self.thres,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SDSMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{sdsmote,\n author={Li, K. and Zhang, W. and Lu, Q. and\n Fang, X.},\n booktitle={2014 International Conference on\n Identification, Information and\n Knowledge in the Internet of\n Things},\n title={An Improved SMOTE Imbalanced Data\n Classification Method Based on Support\n Degree},\n year={2014},\n volume={},\n number={},\n pages={34-38},\n keywords={data mining;pattern classification;\n sampling methods;improved SMOTE\n imbalanced data classification\n method;support degree;data mining;\n class distribution;imbalanced\n data-set classification;over sampling\n method;minority class sample\n generation;minority class sample\n selection;minority class boundary\n sample identification;Classification\n algorithms;Training;Bagging;Computers;\n Testing;Algorithm design and analysis;\n Data mining;Imbalanced data-sets;\n Classification;Boundary sample;Support\n degree;SMOTE},\n doi={10.1109/IIKI.2014.14},\n ISSN={},\n month={Oct}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n # fitting nearest neighbors model to find closest majority points to\n # minority samples\n nn = NearestNeighbors(n_neighbors=len(X_maj), n_jobs=self.n_jobs)\n nn.fit(X_maj)\n dist, ind = nn.kneighbors(X_min)\n\n # calculating the sum according to S3 in the paper\n S_i = np.sum(dist, axis=1)\n # calculating average distance according to S5\n S = np.sum(S_i)\n S_ave = S/(len(X_min)*len(X_maj))\n\n # calculate support degree\n def support_degree(x):\n return len(nn.radius_neighbors(x.reshape(1, -1),\n S_ave,\n return_distance=False))\n\n k = np.array([support_degree(X_min[i]) for i in range(len(X_min))])\n density = k/np.sum(k)\n\n # fitting nearest neighbors model to minority samples to run\n # SMOTE-like sampling\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(density)), p=density)\n random_neighbor_idx = self.random_state.choice(ind[idx][1:])\n X_a = X_min[idx]\n X_b = X_min[random_neighbor_idx]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass DSMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{dsmote,\n author={Mahmoudi, S. and Moradi, P. and Akhlaghian,\n F. and Moradi, R.},\n booktitle={2014 4th International Conference on\n Computer and Knowledge Engineering\n (ICCKE)},\n title={Diversity and separable metrics in\n over-sampling technique for imbalanced\n data classification},\n year={2014},\n volume={},\n number={},\n pages={152-158},\n keywords={learning (artificial intelligence);\n pattern classification;sampling\n methods;diversity metric;separable\n metric;over-sampling technique;\n imbalanced data classification;\n class distribution techniques;\n under-sampling technique;DSMOTE method;\n imbalanced learning problem;diversity\n measure;separable measure;Iran\n University of Medical Science;UCI\n dataset;Accuracy;Classification\n algorithms;Vectors;Educational\n institutions;Euclidean distance;\n Data mining;Diversity measure;\n Separable Measure;Over-Sampling;\n Imbalanced Data;Classification\n problems},\n doi={10.1109/ICCKE.2014.6993409},\n ISSN={},\n month={Oct}}\n\n Notes:\n * The method is highly inefficient when the number of minority samples\n is high, time complexity is O(n^3), with 1000 minority samples it\n takes about 1e9 objective function evaluations to find 1 new sample\n points. Adding 1000 samples would take about 1e12 evaluations of\n the objective function, which is unfeasible. We introduce a new\n parameter, n_step, and during the search for the new sample at\n most n_step combinations of minority samples are tried.\n * Abnormality of minority points is defined in the paper as\n D_maj/D_min, high abnormality means that the minority point is\n close to other minority points and very far from majority points.\n This is definitely not abnormality,\n I have implemented the opposite.\n * Nothing ensures that the fisher statistics and the variance from\n the geometric mean remain comparable, which might skew the\n optimization towards one of the sub-objectives.\n * MinMax normalization doesn't work, each attribute will have a 0\n value, which will make the geometric mean of all attribute 0.\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n rate=0.1,\n n_step=50,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n rate (float): [0,1] rate of minority samples to turn into majority\n n_step (int): number of random configurations to check for new\n samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_in_range(rate, \"rate\", [0, 1])\n self.check_greater_or_equal(n_step, \"n_step\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.rate = rate\n self.n_step = n_step\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'rate': [0.1, 0.2],\n 'n_step': [50]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n mms = MinMaxScaler(feature_range=(1e-6, 1.0 - 1e-6))\n X = mms.fit_transform(X)\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n # fitting nearest neighbors model\n nn = NearestNeighbors(n_neighbors=len(X_maj))\n nn.fit(X_maj)\n dist, ind = nn.kneighbors(X_min)\n\n # compute mean distances, the D_min is compenstaed for taking into\n # consideration self-distances in the mean\n D_maj = np.mean(dist, axis=1)\n D_min = np.mean(pairwise_distances(X_min), axis=1) * \\\n len(X_min)/(len(X_min)-1)\n\n # computing degree of abnormality\n abnormality = D_min/D_maj\n\n # sorting minority indices in decreasing order by abnormality\n to_sort = zip(abnormality, np.arange(len(abnormality)))\n abnormality, indices = zip(*sorted(to_sort, key=lambda x: -x[0]))\n rate = int(self.rate*len(abnormality))\n\n if rate > 0:\n # moving the most abnormal points to the majority class\n X_maj = np.vstack([X_maj, X_min[np.array(indices[:rate])]])\n # removing the most abnormal points form the minority class\n X_min = np.delete(X_min, indices[:rate], axis=0)\n\n # computing the mean and variance of points in the majority class\n var_maj = np.mean(np.var(X_maj, axis=0))\n mean_maj = np.mean(X_maj)\n\n # this is the original objective function, however, using this\n # is very inefficient if the number of records increases above\n # approximately 1000\n # def objective(X):\n # \"\"\"\n # The objective function to be maximized\n #\n # Args:\n # X (np.matrix): dataset\n #\n # Returns:\n # float: the value of the objective function\n # \"\"\"\n # gm= gmean(X, axis= 0)\n # gdiv= np.mean(np.linalg.norm(X - gm, axis= 1))\n # fisher= (np.mean(X) - mean_maj)**2/(np.mean(np.var(X, axis= 0)) \\\n # + var_maj)\n # return gdiv + fisher\n\n # in order to make the code more efficient, we do maintain some\n # variables containing the main componentes of the objective function\n # and apply only small corrections based on the new values being added\n # the effect should be identical\n\n # records the sum of logarithms in X_min, used to compute the geometric\n # mean\n min_log_sum = np.sum(np.log(X_min), axis=0)\n # contains the sum of values in X_min, coordinatewise\n min_sum = np.sum(X_min, axis=0)\n # contains the squares of sums of values in X_min, coordinatewise\n min_sum2 = np.sum(X_min**2, axis=0)\n # contains the sum of all numbers in X_min\n min_all_sum = np.sum(X_min)\n\n min_norm = np.linalg.norm(X_min)**2\n\n # do the sampling\n n_added = 0\n while n_added < n_to_sample:\n best_candidate = None\n highest_score = 0.0\n # we try n_step combinations of minority samples\n len_X = len(X_min)\n n_steps = min([len_X*(len_X-1)*(len_X-2), self.n_step])\n for _ in range(n_steps):\n i, j, k = self.random_state.choice(np.arange(len_X),\n 3,\n replace=False)\n gm = gmean(X_min[np.array([i, j, k])], axis=0)\n\n # computing the new objective function for the new point (gm)\n # added\n new_X_min = np.vstack([X_min, gm])\n\n # updating the components of the objective function\n new_min_log_sum = min_log_sum + np.log(gm)\n new_min_sum = min_sum + gm\n new_min_sum2 = min_sum2 + gm**2\n new_min_all_sum = min_all_sum + np.sum(gm)\n\n # computing mean, var, gmean and mean of all elements with\n # the new sample (gm)\n new_min_mean = new_min_sum/(len(new_X_min))\n new_min_var = new_min_sum2/(len(new_X_min)) - new_min_mean**2\n new_min_gmean = np.exp(new_min_log_sum/(len(new_X_min)))\n new_min_all_n = (len(new_X_min))*len(X_min[0])\n new_min_all_mean = new_min_all_sum / new_min_all_n\n\n new_min_norm = min_norm + np.linalg.norm(gm)\n\n # computing the new objective function value\n inner_prod = np.dot(new_X_min, new_min_gmean)\n gmean_norm = np.linalg.norm(new_min_gmean)**2\n term_sum = new_min_norm - 2*inner_prod + gmean_norm\n new_gdiv = np.mean(np.sqrt(term_sum))\n\n fisher_numerator = (new_min_all_mean - mean_maj)**2\n fisher_denominator = np.mean(new_min_var) + var_maj\n new_fisher = fisher_numerator / fisher_denominator\n\n score = new_gdiv + new_fisher\n\n # evaluate the objective function\n # score= objective(np.vstack([X_min, gm]))\n # check if the score is better than the best so far\n if score > highest_score:\n highest_score = score\n best_candidate = gm\n cand_min_log_sum = new_min_log_sum\n cand_min_sum = new_min_sum\n cand_min_sum2 = new_min_sum2\n cand_min_all_sum = new_min_all_sum\n cand_min_norm = new_min_norm\n\n # add the best candidate to the minority samples\n X_min = np.vstack([X_min, best_candidate])\n n_added = n_added + 1\n\n min_log_sum = cand_min_log_sum\n min_sum = cand_min_sum\n min_sum2 = cand_min_sum2\n min_all_sum = cand_min_all_sum\n min_norm = cand_min_norm\n\n return (mms.inverse_transform(np.vstack([X_maj, X_min])),\n np.hstack([np.repeat(self.maj_label, len(X_maj)),\n np.repeat(self.min_label, len(X_min))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'rate': self.rate,\n 'n_step': self.n_step,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass G_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{g_smote,\n author={Sandhan, T. and Choi, J. Y.},\n booktitle={2014 22nd International Conference on\n Pattern Recognition},\n title={Handling Imbalanced Datasets by Partially\n Guided Hybrid Sampling for Pattern\n Recognition},\n year={2014},\n volume={},\n number={},\n pages={1449-1453},\n keywords={Gaussian processes;learning (artificial\n intelligence);pattern classification;\n regression analysis;sampling methods;\n support vector machines;imbalanced\n datasets;partially guided hybrid\n sampling;pattern recognition;real-world\n domains;skewed datasets;dataset\n rebalancing;learning algorithm;\n extremely low minority class samples;\n classification tasks;extracted hidden\n patterns;support vector machine;\n logistic regression;nearest neighbor;\n Gaussian process classifier;Support\n vector machines;Proteins;Pattern\n recognition;Kernel;Databases;Gaussian\n processes;Vectors;Imbalanced dataset;\n protein classification;ensemble\n classifier;bootstrapping;Sat-image\n classification;medical diagnoses},\n doi={10.1109/ICPR.2014.258},\n ISSN={1051-4651},\n month={Aug}}\n\n Notes:\n * the non-linear approach is inefficient\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n method='linear',\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n method (str): 'linear'/'non-linear_2.0' - the float can be any\n number: standard deviation in the\n Gaussian-kernel\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n if not method == 'linear' and not method.startswith('non-linear'):\n raise ValueError(self.__class__.__name__ + \": \" +\n 'Method parameter %s is not supported' % method)\n elif method.startswith('non-linear'):\n parameter = float(method.split('_')[-1])\n if parameter <= 0:\n message = (\"Non-positive non-linear parameter %f is \"\n \"not supported\") % parameter\n raise ValueError(self.__class__.__name__ + \": \" + message)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.method = method\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'method': ['linear', 'non-linear_0.1',\n 'non-linear_1.0',\n 'non-linear_2.0']}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n if self.method == 'linear':\n # finding H_l by linear decomposition\n cov = np.cov(X_min, rowvar=False)\n w, v = np.linalg.eig(cov)\n H_l = v[np.argmax(w)]\n else:\n # building a non-linear kernel matrix and finding H_n by its\n # decomposition\n self.sigma = float(self.method.split('_')[-1])\n kernel_matrix = pairwise_distances(X_min)\n kernel_matrix = kernel_matrix/(2.0*self.sigma**2)\n kernel_matrix = np.exp(kernel_matrix)\n try:\n w_k, v_k = np.linalg.eig(kernel_matrix)\n except Exception as e:\n return X.copy(), y.copy()\n H_n = v_k[np.argmax(w_k)]\n\n def kernel(x, y):\n return np.linalg.norm(x - y)/(2.0*self.sigma**2)\n\n # generating samples\n samples = []\n\n def angle(P, n, H_l):\n numerator = np.abs(np.dot(P[n], H_l))\n denominator = np.linalg.norm(P[n])*np.linalg.norm(H_l)\n return np.arccos(numerator/denominator)\n\n while len(samples) < n_to_sample:\n idx = self.random_state.randint(len(X_min))\n # calculating difference vectors from all neighbors\n P = X_min[ind[idx][1:]] - X_min[idx]\n if self.method == 'linear':\n # calculating angles with the principal direction\n thetas = np.array([angle(P, n, H_l) for n in range(len(P))])\n else:\n thetas = []\n # calculating angles of the difference vectors and the\n # principal direction in feature space\n for n in range(len(P)):\n # calculating representation in feature space\n feature_vector = np.array(\n [kernel(X_min[k], P[n]) for k in range(len(X_min))])\n dp = np.dot(H_n, feature_vector)\n denom = np.linalg.norm(feature_vector)*np.linalg.norm(H_n)\n thetas.append(np.arccos(np.abs(dp)/denom))\n thetas = np.array(thetas)\n\n # using the neighbor with the difference along the most similar\n # direction to the principal direction of the data\n n = np.argmin(thetas)\n X_a = X_min[idx]\n X_b = X_min[ind[idx][1:][n]]\n samples.append(self.sample_between_points_componentwise(X_a, X_b))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'method': self.method,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass NT_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{nt_smote,\n author={Xu, Y. H. and Li, H. and Le, L. P. and\n Tian, X. Y.},\n booktitle={2014 Seventh International Joint\n Conference on Computational Sciences\n and Optimization},\n title={Neighborhood Triangular Synthetic Minority\n Over-sampling Technique for Imbalanced\n Prediction on Small Samples of Chinese\n Tourism and Hospitality Firms},\n year={2014},\n volume={},\n number={},\n pages={534-538},\n keywords={financial management;pattern\n classification;risk management;sampling\n methods;travel industry;Chinese\n tourism; hospitality firms;imbalanced\n risk prediction;minority class samples;\n up-sampling approach;neighborhood\n triangular synthetic minority\n over-sampling technique;NT-SMOTE;\n nearest neighbor idea;triangular area\n sampling idea;single classifiers;data\n excavation principles;hospitality\n industry;missing financial indicators;\n financial data filtering;financial risk\n prediction;MDA;DT;LSVM;logit;probit;\n firm risk prediction;Joints;\n Optimization;imbalanced datasets;\n NT-SMOTE;neighborhood triangular;\n random sampling},\n doi={10.1109/CSO.2014.104},\n ISSN={},\n month={July}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_application]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # find two nearest minority samples\n nn = NearestNeighbors(n_neighbors=3, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n samples = []\n while len(samples) < n_to_sample:\n # select point randomly\n idx = self.random_state.randint(len(X_min))\n P_1 = X_min[idx]\n # find two closest neighbors\n P_2 = X_min[ind[idx][1]]\n P_3 = X_min[ind[idx][2]]\n # generate random point by sampling the specified triangle\n r_1 = self.random_state.random_sample()\n r_2 = self.random_state.random_sample()\n samples.append((P_3 + r_1 * ((P_1 + r_2 * (P_2 - P_1)) - P_3)))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Lee(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{lee,\n author = {Lee, Jaedong and Kim,\n Noo-ri and Lee, Jee-Hyong},\n title = {An Over-sampling Technique with Rejection\n for Imbalanced Class Learning},\n booktitle = {Proceedings of the 9th International\n Conference on Ubiquitous\n Information Management and\n Communication},\n series = {IMCOM '15},\n year = {2015},\n isbn = {978-1-4503-3377-1},\n location = {Bali, Indonesia},\n pages = {102:1--102:6},\n articleno = {102},\n numpages = {6},\n doi = {10.1145/2701126.2701181},\n acmid = {2701181},\n publisher = {ACM},\n address = {New York, NY, USA},\n keywords = {data distribution, data preprocessing,\n imbalanced problem, rejection rule,\n synthetic minority oversampling\n technique}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n rejection_level=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbor\n component\n rejection_level (float): the rejection level of generated samples,\n if the fraction of majority labels in\n the local environment is higher than\n this number, the generated point is\n rejected\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_in_range(rejection_level, \"rejection_level\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.rejection_level = rejection_level\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'rejection_level': [0.3, 0.5, 0.7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors models to find neighbors of minority\n # samples in the total data and in the minority datasets\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn_min = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn_min.fit(X_min)\n dist_min, ind_min = nn_min.kneighbors(X_min)\n\n # do the sampling, we impleneted a continouos tweaking of rejection\n # levels in order to fix situations when no unrejectable data can\n # be can be generated\n samples = []\n passed = 0\n trial = 0\n rejection_level = self.rejection_level\n while len(samples) < n_to_sample:\n # checking if we managed to generate a single data in 1000 trials\n if passed == trial and passed > 1000:\n rejection_level = rejection_level + 0.1\n trial = 0\n passed = 0\n trial = trial + 1\n # generating random point\n idx = self.random_state.randint(len(X_min))\n random_neighbor_idx = self.random_state.choice(ind_min[idx][1:])\n X_a = X_min[idx]\n X_b = X_min[random_neighbor_idx]\n random_point = self.sample_between_points(X_a, X_b)\n # checking if the local environment is above the rejection level\n dist_new, ind_new = nn.kneighbors(random_point.reshape(1, -1))\n maj_frac = np.sum(y[ind_new][:-1] ==\n self.maj_label)/self.n_neighbors\n if maj_frac < rejection_level:\n samples.append(random_point)\n else:\n passed = passed + 1\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'rejection_level': self.rejection_level,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SPY(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{spy,\n author={Dang, X. T. and Tran, D. H. and Hirose, O.\n and Satou, K.},\n booktitle={2015 Seventh International Conference\n on Knowledge and Systems Engineering\n (KSE)},\n title={SPY: A Novel Resampling Method for\n Improving Classification Performance in\n Imbalanced Data},\n year={2015},\n volume={},\n number={},\n pages={280-285},\n keywords={decision making;learning (artificial\n intelligence);pattern classification;\n sampling methods;SPY;resampling\n method;decision-making process;\n biomedical data classification;\n class imbalance learning method;\n SMOTE;oversampling method;UCI\n machine learning repository;G-mean\n value;borderline-SMOTE;\n safe-level-SMOTE;Support vector\n machines;Training;Bioinformatics;\n Proteins;Protein engineering;Radio\n frequency;Sensitivity;Imbalanced\n dataset;Over-sampling;\n Under-sampling;SMOTE;\n borderline-SMOTE},\n doi={10.1109/KSE.2015.24},\n ISSN={},\n month={Oct}}\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority]\n\n def __init__(self,\n n_neighbors=5,\n threshold=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_neighbors (int): number of neighbors in nearest neighbor\n component\n threshold (float): threshold*n_neighbors gives the threshold z\n described in the paper\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_in_range(threshold, \"threshold\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_neighbors = n_neighbors\n self.threshold = threshold\n self.n_jobs = n_jobs\n\n # random state takes no effect for this technique\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'n_neighbors': [3, 5, 7],\n 'threshold': [0.3, 0.5, 0.7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n y_new = y.copy()\n z = self.threshold*n_neighbors\n\n # checking the neighbors of each minority sample\n for i in range(len(X_min)):\n majority_mask = y[ind[i][1:]] == self.maj_label\n x = np.sum(majority_mask)\n # if the number of majority samples in the neighborhood is\n # smaller than a threshold\n # their labels are changed to minority\n if x < z:\n y_new[ind[i][1:][majority_mask]] = self.min_label\n\n return X.copy(), y_new\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_neighbors': self.n_neighbors,\n 'threshold': self.threshold,\n 'n_jobs': self.n_jobs}\n\n\nclass SMOTE_PSOBAT(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{smote_psobat,\n author={Li, J. and Fong, S. and Zhuang, Y.},\n booktitle={2015 3rd International Symposium on\n Computational and Business\n Intelligence (ISCBI)},\n title={Optimizing SMOTE by Metaheuristics with\n Neural Network and Decision Tree},\n year={2015},\n volume={},\n number={},\n pages={26-32},\n keywords={data mining;particle swarm\n optimisation;pattern classification;\n data mining;classifier;metaherustics;\n SMOTE parameters;performance\n indicators;selection optimization;\n PSO;particle swarm optimization\n algorithm;BAT;bat-inspired algorithm;\n metaheuristic optimization algorithms;\n nearest neighbors;imbalanced dataset\n problem;synthetic minority\n over-sampling technique;decision tree;\n neural network;Classification\n algorithms;Neural networks;Decision\n trees;Training;Optimization;Particle\n swarm optimization;Data mining;SMOTE;\n Swarm Intelligence;parameter\n selection optimization},\n doi={10.1109/ISCBI.2015.12},\n ISSN={},\n month={Dec}}\n\n Notes:\n * The parameters of the memetic algorithms are not specified.\n * I have checked multiple paper describing the BAT algorithm, but the\n meaning of \"Generate a new solution by flying randomly\" is still\n unclear.\n * It is also unclear if best solutions are recorded for each bat, or\n the entire population.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_memetic]\n\n def __init__(self,\n maxit=50,\n c1=0.3,\n c2=0.1,\n c3=0.1,\n alpha=0.9,\n gamma=0.9,\n method='bat',\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n maxit (int): maximum number of iterations\n c1 (float): intertia weight of PSO\n c2 (float): attraction of local maximums in PSO\n c3 (float): attraction of global maximum in PSO\n alpha (float): alpha parameter of the method\n gamma (float): gamma parameter of the method\n method (str): optimization technique to be used\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(maxit, \"maxit\", 1)\n self.check_greater_or_equal(c1, \"c1\", 0)\n self.check_greater_or_equal(c2, \"c2\", 0)\n self.check_greater_or_equal(c3, \"c3\", 0)\n self.check_greater_or_equal(alpha, \"alpha\", 0)\n self.check_greater_or_equal(gamma, \"gamma\", 0)\n self.check_isin(method, \"method\", ['pso', 'bat'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.maxit = maxit\n self.c1 = c1\n self.c2 = c2\n self.c3 = c3\n self.alpha = alpha\n self.gamma = gamma\n self.method = method\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n bat_pc = cls.generate_parameter_combinations({'maxit': [50],\n 'alpha': [0.7, 0.9],\n 'gamma': [0.7, 0.9],\n 'method': ['bat']}, raw)\n pso_pc = cls.generate_parameter_combinations({'maxit': [50],\n 'c1': [0.2, 0.5],\n 'c2': [0.1, 0.2],\n 'c3': [0.1, 0.2],\n 'method': ['pso']}, raw)\n if not raw:\n bat_pc.extend(pso_pc)\n else:\n bat_pc = {**bat_pc, **pso_pc}\n return bat_pc\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n def evaluate(K, proportion):\n \"\"\"\n Evaluate given configuration\n\n Args:\n K (int): number of neighbors in nearest neighbors component\n proportion (float): proportion of missing data to generate\n\n Returns:\n float, float: kappa and accuracy scores\n \"\"\"\n smote = SMOTE(proportion=proportion,\n n_neighbors=K,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n X_samp, y_samp = smote.sample(X, y)\n\n # doing k-fold cross validation\n kfold = KFold(5)\n preds = []\n tests = []\n for train, test in kfold.split(X_samp):\n dt = DecisionTreeClassifier(random_state=self.random_state)\n dt.fit(X_samp[train], y_samp[train])\n preds.append(dt.predict(X_samp[test]))\n tests.append(y_samp[test])\n preds = np.hstack(preds)\n tests = np.hstack(tests)\n # computing the kappa score\n tp = np.sum(np.logical_and(preds == tests,\n tests == self.min_label))\n fn = np.sum(np.logical_and(preds != tests,\n tests == self.min_label))\n tn = np.sum(np.logical_and(preds == tests,\n tests == self.maj_label))\n fp = np.sum(np.logical_and(preds != tests,\n tests == self.maj_label))\n\n p_o = (tp + tn)/(tp + fn + tn + fp)\n p_e = (tp + fn)*(tp + fp)/(tp + fn + tn + fp)**2 + \\\n (fp + tn)*(fn + tn)/(tp + fn + tn + fp)**2\n\n kappa = (p_o - p_e)/(1.0 - p_e)\n\n return kappa, p_o\n\n def PSO():\n \"\"\"\n PSO optimization\n\n Returns:\n int, float: the best K and proportion values\n \"\"\"\n # a reasonable range of nearest neighbors to use with SMOTE\n k_range = [2, min([np.sum(y == self.min_label), 10])]\n # a reasonable range of proportions\n proportion_range = [0.1, 2.0]\n # population size\n n_pop = 10\n\n # initial particles\n def init_particle():\n k_rand = self.random_state.randint(k_range[0], k_range[1])\n r = self.random_state.random_sample()\n diff = proportion_range[1] - proportion_range[0]\n vect = r*diff + proportion_range[0]\n return np.array([k_rand, vect])\n ps = [init_particle() for _ in range(n_pop)]\n # initial velocities\n velocities = [np.array([0, 0]) for _ in range(n_pop)]\n # best configurations of particles\n local_best = [ps[i].copy() for i in range(n_pop)]\n # scores of best configurations of particles\n local_scores = [(0, 0) for _ in range(n_pop)]\n # global best configuration of particles\n global_best = ps[0].copy()\n # global best score\n global_scores = (0, 0)\n\n # executing the particle swarm optimization\n not_changed = 0\n for _ in range(self.maxit):\n # if the configurations didn't change for 10 iterations, stop\n if not_changed > len(ps)*10:\n break\n # evaluating each of the configurations\n for i in range(len(ps)):\n scores = evaluate(np.int(ps[i][0]), ps[i][1])\n # recording if the best scores didn't change\n not_changed = not_changed + 1\n # registering locally and globally best scores\n if (min([local_scores[i][0], scores[0]]) > 0.4\n and local_scores[i][1] > scores[1]):\n local_scores[i] = scores\n local_best[i] = ps[i].copy()\n not_changed = 0\n elif scores[0] > 0.4 and local_scores[i][0] <= 0.4:\n local_scores[i] = scores\n local_best[i] = ps[i].copy()\n not_changed = 0\n\n if (min([global_scores[0], scores[0]]) > 0.4\n and global_scores[1] > scores[1]):\n global_scores = scores\n global_best = ps[i].copy()\n not_changed = 0\n elif scores[0] > 0.4 and global_scores[0] <= 0.4:\n global_scores = scores\n global_best = ps[i].copy()\n not_changed = 0\n\n # update velocities\n for i in range(len(ps)):\n velocities[i] = self.c1*velocities[i] + \\\n (local_best[i] - ps[i])*self.c2 + \\\n (global_best - ps[i])*self.c3\n # clipping velocities if required\n while abs(velocities[i][0]) > k_range[1] - k_range[0]:\n velocities[i][0] = velocities[i][0]/2.0\n diff = proportion_range[1] - proportion_range[0]\n while abs(velocities[i][1]) > diff:\n velocities[i][1] = velocities[i][1]/2.0\n\n # update positions\n for i in range(len(ps)):\n ps[i] = ps[i] + velocities[i]\n # clipping positions according to the specified ranges\n ps[i][0] = np.clip(ps[i][0], k_range[0], k_range[1])\n ps[i][1] = np.clip(ps[i][1],\n proportion_range[0],\n proportion_range[1])\n\n return global_best\n\n def BAT():\n \"\"\"\n BAT optimization\n\n Returns:\n int, float: the best K and proportion values\n \"\"\"\n\n if sum(y == self.min_label) < 2:\n return X.copy(), y.copy()\n\n # a reasonable range of nearest neighbors to use with SMOTE\n k_range = [1, min([np.sum(y == self.min_label), 10])]\n # a reasonable range of proportions\n proportion_range = [0.1, 2.0]\n # population size\n n_pop = 10\n # maximum frequency\n f_max = 10\n\n def init_bat():\n k_rand = self.random_state.randint(k_range[0], k_range[1])\n r = self.random_state.random_sample()\n diff = proportion_range[1] - proportion_range[0]\n return np.array([k_rand, r*diff + proportion_range[0]])\n\n # initial bat positions\n bats = [init_bat() for _ in range(n_pop)]\n # initial velocities\n velocities = [np.array([0, 0]) for _ in range(10)]\n # best configurations of particles\n local_best = [[[[0.0, 0.0], bats[i].copy()]]\n for i in range(len(bats))]\n # scores of best configurations of particles\n global_best = [[0.0, 0.0], bats[0].copy()]\n # pulse frequencies\n f = self.random_state.random_sample(size=n_pop)*f_max\n # pulse rates\n r = self.random_state.random_sample(size=n_pop)\n # loudness\n A = self.random_state.random_sample(size=n_pop)\n\n # gamma parameter according to the BAT paper\n gamma = self.gamma\n # alpha parameter according to the BAT paper\n alpha = self.alpha\n\n # initial best solution\n bat_star = bats[0].copy()\n\n not_changed = 0\n for t in range(self.maxit):\n not_changed = not_changed + 1\n\n if not_changed > 10:\n break\n\n # update frequencies\n f = self.random_state.random_sample(size=n_pop)*f_max\n\n # update velocities\n for i in range(len(velocities)):\n velocities[i] = velocities[i] + (bats[i] - bat_star)*f[i]\n\n # update bats\n for i in range(len(bats)):\n bats[i] = bats[i] + velocities[i]\n bats[i][0] = np.clip(bats[i][0], k_range[0], k_range[1])\n bats[i][1] = np.clip(\n bats[i][1], proportion_range[0], proportion_range[1])\n\n for i in range(n_pop):\n # generate local solution\n if self.random_state.random_sample() > r[i]:\n n_rand = min([len(local_best[i]), 5])\n rand_int = self.random_state.randint(n_rand)\n random_best_sol = local_best[i][rand_int][1]\n rr = self.random_state.random_sample(\n size=len(bat_star))\n bats[i] = random_best_sol + rr*A[i]\n\n # evaluate and do local search\n for i in range(n_pop):\n scores = evaluate(int(bats[i][0]), bats[i][1])\n\n # checking if the scores are better than the global score\n # implementation of the multi-objective criterion in the\n # SMOTE-PSOBAT paper\n improved_global = False\n if (min([global_best[0][0], scores[0]]) > 0.4\n and global_best[0][1] > scores[1]):\n improved_global = True\n not_changed = 0\n elif scores[0] > 0.4 and global_best[0][0] <= 0.4:\n improved_global = True\n not_changed = 0\n\n # checking if the scores are better than the local scores\n # implementation of the multi-objective criterion in the\n # SMOTE-PSOBAT paper\n improved_local = False\n if (min([local_best[i][0][0][0], scores[0]]) > 0.4\n and local_best[i][0][0][1] > scores[1]):\n improved_local = True\n elif scores[0] > 0.4 and local_best[i][0][0][0] <= 0.4:\n improved_local = True\n\n # local search in the bet algorithm\n if (self.random_state.random_sample() < A[i]\n and improved_local):\n local_best[i].append([scores, bats[i].copy()])\n A[i] = A[i]*alpha\n r[i] = r[i]*(1 - np.exp(-gamma*t))\n if (self.random_state.random_sample() < A[i]\n and improved_global):\n global_best = [scores, bats[i].copy()]\n\n # ranking local solutions to keep track of the best 5\n local_best[i] = sorted(\n local_best[i], key=lambda x: -x[0][0])\n local_best[i] = local_best[i][:min(\n [len(local_best[i]), 5])]\n\n t = t + 1\n\n return global_best[1]\n\n if self.method == 'pso':\n best_combination = PSO()\n elif self.method == 'bat':\n best_combination = BAT()\n else:\n message = \"Search method %s not supported yet.\" % self.method\n raise ValueError(self.__class__.__name__ + \": \" + message)\n\n return SMOTE(proportion=best_combination[1],\n n_neighbors=int(best_combination[0]),\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'maxit': self.maxit,\n 'c1': self.c1,\n 'c2': self.c2,\n 'c3': self.c3,\n 'alpha': self.alpha,\n 'gamma': self.gamma,\n 'method': self.method,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MDO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @ARTICLE{mdo,\n author={Abdi, L. and Hashemi, S.},\n journal={IEEE Transactions on Knowledge and Data\n Engineering},\n title={To Combat Multi-Class Imbalanced Problems\n by Means of Over-Sampling Techniques},\n year={2016},\n volume={28},\n number={1},\n pages={238-251},\n keywords={covariance analysis;learning (artificial\n intelligence);modelling;pattern\n classification;sampling methods;\n statistical distributions;minority\n class instance modelling;probability\n contour;covariance structure;MDO;\n Mahalanobis distance-based oversampling\n technique;data-oriented technique;\n model-oriented solution;machine learning\n algorithm;data skewness;multiclass\n imbalanced problem;Mathematical model;\n Training;Accuracy;Eigenvalues and\n eigenfunctions;Machine learning\n algorithms;Algorithm design and analysis;\n Benchmark testing;Multi-class imbalance\n problems;over-sampling techniques;\n Mahalanobis distance;Multi-class imbalance\n problems;over-sampling techniques;\n Mahalanobis distance},\n doi={10.1109/TKDE.2015.2458858},\n ISSN={1041-4347},\n month={Jan}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_dim_reduction]\n\n def __init__(self,\n proportion=1.0,\n K2=5,\n K1_frac=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n K2 (int): number of neighbors\n K1_frac (float): the fraction of K2 to set K1\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(K2, \"K2\", 1)\n self.check_greater_or_equal(K1_frac, \"K1_frac\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.K2 = K2\n self.K1_frac = K1_frac\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'K2': [3, 5, 7],\n 'K1_frac': [0.3, 0.5, 0.7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # determining K1\n self.K1 = int(self.K2*self.K1_frac)\n K1 = min([self.K1, len(X)])\n K2 = min([self.K2 + 1, len(X)])\n\n # Algorithm 2 - chooseSamples\n nn = NearestNeighbors(n_neighbors=K2, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n # extracting the number of minority samples in local neighborhoods\n n_min = np.array([np.sum(y[ind[i][1:]] == self.min_label)\n for i in range(len(X_min))])\n\n # extracting selected samples from minority ones\n X_sel = X_min[n_min >= K1]\n\n # falling back to returning input data if all the input is considered\n # noise\n if len(X_sel) == 0:\n _logger.info(self.__class__.__name__ +\n \": \" + \"No samples selected\")\n return X.copy(), y.copy()\n\n # computing distribution\n weights = n_min[n_min >= K1]/K2\n weights = weights/np.sum(weights)\n\n # Algorithm 1 - MDO over-sampling\n mu = np.mean(X_sel, axis=0)\n Z = X_sel - mu\n # executing PCA\n pca = PCA(n_components=min([len(Z[0]), len(Z)])).fit(Z)\n T = pca.transform(Z)\n # computing variances (step 13)\n V = np.var(T, axis=0)\n\n V[V < 0.001] = 0.001\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n # selecting a sample randomly according to the distribution\n idx = self.random_state.choice(np.arange(len(X_sel)), p=weights)\n\n # finding vector in PCA space\n X_temp = T[idx]\n X_temp_square = X_temp**2\n\n # computing alphas\n alpha = np.sum(X_temp_square/V)\n alpha_V = alpha*V\n alpha_V[alpha_V < 0.001] = 0.001\n\n # initializing a new vector\n X_new = np.zeros(len(X_temp))\n\n # sampling components of the new vector\n s = 0\n for j in range(len(X_temp)-1):\n r = (2*self.random_state.random_sample()-1)*np.sqrt(alpha_V[j])\n X_new[j] = r\n s = s + (r**2/alpha_V[j])\n\n if s > 1:\n last_fea_val = 0\n else:\n tmp = (1 - s)*alpha*V[-1]\n if tmp < 0:\n tmp = 0\n last_fea_val = np.sqrt(tmp)\n # determine last component to fulfill the ellipse equation\n X_new[-1] = (2*self.random_state.random_sample()-1)*last_fea_val\n # append to new samples\n samples.append(X_new)\n\n return (np.vstack([X, pca.inverse_transform(samples) + mu]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'K2': self.K2,\n 'K1_frac': self.K1_frac,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Random_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{random_smote,\n author=\"Dong, Yanjie\n and Wang, Xuehua\",\n editor=\"Xiong, Hui\n and Lee, W. B.\",\n title=\"A New Over-Sampling Approach: Random-SMOTE\n for Learning from Imbalanced Data Sets\",\n booktitle=\"Knowledge Science, Engineering and\n Management\",\n year=\"2011\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"343--352\",\n isbn=\"978-3-642-25975-3\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): number of neighbors\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model to find closest neighbors of minority\n # points\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(X_min)))\n y_1_idx, y_2_idx = self.random_state.choice(ind[idx][1:], 2)\n t = self.sample_between_points_componentwise(\n X_min[y_1_idx], X_min[y_2_idx])\n samples.append(\n self.sample_between_points_componentwise(X_min[idx], t))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ISMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{ismote,\n author=\"Li, Hu\n and Zou, Peng\n and Wang, Xiang\n and Xia, Rongze\",\n editor=\"Sun, Zengqi\n and Deng, Zhidong\",\n title=\"A New Combination Sampling Method for\n Imbalanced Data\",\n booktitle=\"Proceedings of 2013 Chinese Intelligent\n Automation Conference\",\n year=\"2013\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"547--554\",\n isbn=\"978-3-642-38466-0\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority]\n\n def __init__(self,\n n_neighbors=5,\n minority_weight=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_neighbors (int): number of neighbors\n minority_weight (float): weight parameter according to the paper\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(minority_weight, \"minority_weight\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_neighbors = n_neighbors\n self.minority_weight = minority_weight\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'n_neighbors': [3, 5, 7],\n 'minority_weight': [0.2, 0.5, 0.8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n n_to_sample = int((len(X_maj) - len(X_min))/2 + 0.5)\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # computing distances of majority samples from minority ones\n nn = NearestNeighbors(n_neighbors=len(X_min), n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_maj)\n\n # sort majority instances in descending order by their mean distance\n # from minority samples\n to_sort = zip(np.arange(len(X_maj)), np.mean(dist, axis=1))\n ind_sorted, dist_sorted = zip(*sorted(to_sort, key=lambda x: -x[1]))\n\n # remove the ones being farthest from the minority samples\n X_maj = X_maj[list(ind_sorted[n_to_sample:])]\n\n # construct new dataset\n X_new = np.vstack([X_maj, X_min])\n y_new = np.hstack([np.repeat(self.maj_label, len(X_maj)),\n np.repeat(self.min_label, len(X_min))])\n\n X_min = X_new[y_new == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X_new), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_new)\n dist, ind = nn.kneighbors(X_min)\n\n # do the oversampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(X_min)))\n y_idx = self.random_state.choice(ind[idx][1:])\n\n # different generation scheme depending on the class label\n if y_new[y_idx] == self.min_label:\n diff = (X_new[y_idx] - X_min[idx])\n r = self.random_state.random_sample()\n samples.append(X_min[idx] + r * diff * self.minority_weight)\n else:\n diff = (X_new[y_idx] - X_min[idx])\n r = self.random_state.random_sample()\n sample = X_min[idx] + r * diff * (1.0 - self.minority_weight)\n samples.append(sample)\n\n return (np.vstack([X_new, np.vstack(samples)]),\n np.hstack([y_new, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_neighbors': self.n_neighbors,\n 'minority_weight': self.minority_weight,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass VIS_RST(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{vis_rst,\n author=\"Borowska, Katarzyna\n and Stepaniuk, Jaroslaw\",\n editor=\"Saeed, Khalid\n and Homenda, Wladyslaw\",\n title=\"Imbalanced Data Classification: A Novel\n Re-sampling Approach Combining Versatile\n Improved SMOTE and Rough Sets\",\n booktitle=\"Computer Information Systems and\n Industrial Management\",\n year=\"2016\",\n publisher=\"Springer International Publishing\",\n address=\"Cham\",\n pages=\"31--42\",\n isbn=\"978-3-319-45378-1\"\n }\n\n Notes:\n * Replication of DANGER samples will be removed by the last step of\n noise filtering.\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_noise_removal]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0.0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # standardizing the data\n ss = StandardScaler()\n ss.fit(X)\n X = ss.transform(X)\n y = y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n # fitting nearest neighbors model to determine boundary region\n n_neighbors = min([len(X), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_maj)\n\n # determining boundary region of majority samples\n boundary = np.array([np.sum(y[ind[i]] == self.maj_label)\n != n_neighbors for i in range(len(X_maj))])\n y_maj = y[y == self.maj_label]\n y_maj[boundary] = self.min_label\n y[y == self.maj_label] = y_maj\n\n # extracting new minority and majority set\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n # labeling minority samples\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n # extracting labels\n labels = []\n for i in range(len(ind)):\n min_class_neighbors = np.sum(y[ind[i][1:]] == self.maj_label)\n if min_class_neighbors == n_neighbors-1:\n labels.append('noise')\n elif min_class_neighbors < n_neighbors/2:\n labels.append('safe')\n else:\n labels.append('danger')\n\n # extracting the number of different labels (noise is not used)\n safe = np.sum([li == 'safe' for li in labels])\n danger = np.sum([li == 'danger' for li in labels])\n\n if safe == 0:\n mode = 'no_safe'\n elif danger > 0.3*len(X_min):\n mode = 'high_complexity'\n else:\n mode = 'low_complexity'\n\n # fitting nearest neighbors to find the neighbors of minority elements\n # among minority elements\n n_neighbors_min = min([len(X_min), self.n_neighbors + 1])\n nn_min = NearestNeighbors(n_neighbors=n_neighbors_min,\n n_jobs=self.n_jobs)\n nn_min.fit(X_min)\n dist_min, ind_min = nn_min.kneighbors(X_min)\n\n # do the sampling\n samples = []\n mask = np.repeat(False, len(X_min))\n while len(samples) < n_to_sample:\n # choosing a random minority sample\n idx = self.random_state.choice(np.arange(len(X_min)))\n\n # implementation of sampling rules depending on the mode\n if mode == 'high_complexity':\n if labels[idx] == 'noise':\n pass\n elif labels[idx] == 'danger' and not mask[idx]:\n samples.append(X_min[idx])\n mask[idx] = True\n else:\n X_b = X_min[self.random_state.choice(ind_min[idx][1:])]\n samples.append(self.sample_between_points(X_min[idx], X_b))\n elif mode == 'low_complexity':\n if labels[idx] == 'noise':\n pass\n elif labels[idx] == 'danger':\n X_b = X_min[self.random_state.choice(ind_min[idx][1:])]\n samples.append(self.sample_between_points(X_min[idx], X_b))\n elif not mask[idx]:\n samples.append(X_min[idx])\n mask[idx] = True\n else:\n X_b = X_min[self.random_state.choice(ind_min[idx][1:])]\n samples.add(self.sample_between_points(X_min[idx], X_b))\n\n X_samp = np.vstack(samples)\n\n # final noise removal by removing those minority samples generated\n # and not belonging to the lower approximation\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs).fit(X)\n dist_check, ind_check = nn.kneighbors(X_samp)\n\n def maj_zero(i):\n return np.sum(y[ind_check[i][1:]] == self.maj_label) == 0\n\n num_maj_mask = np.array([maj_zero(i) for i in range(len(samples))])\n X_samp = X_samp[num_maj_mask]\n\n return (ss.inverse_transform(np.vstack([X, X_samp])),\n np.hstack([y, np.repeat(self.min_label, len(X_samp))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass GASMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{gasmote,\n author=\"Jiang, Kun\n and Lu, Jing\n and Xia, Kuiliang\",\n title=\"A Novel Algorithm for Imbalance Data\n Classification Based on Genetic\n Algorithm Improved SMOTE\",\n journal=\"Arabian Journal for Science and\n Engineering\",\n year=\"2016\",\n month=\"Aug\",\n day=\"01\",\n volume=\"41\",\n number=\"8\",\n pages=\"3255--3266\",\n issn=\"2191-4281\",\n doi=\"10.1007/s13369-016-2179-2\",\n url=\"https://doi.org/10.1007/s13369-016-2179-2\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_memetic,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n n_neighbors=5,\n maxn=7,\n n_pop=10,\n popl3=5,\n pm=0.3,\n pr=0.2,\n Ge=10,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_neighbors (int): number of neighbors\n maxn (int): maximum number of samples to generate per minority\n instances\n n_pop (int): size of population\n popl3 (int): number of crossovers\n pm (float): mutation probability\n pr (float): selection probability\n Ge (int): number of generations\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(maxn, \"maxn\", 1)\n self.check_greater_or_equal(n_pop, \"n_pop\", 1)\n self.check_in_range(pm, \"pm\", [0, 1])\n self.check_in_range(pr, \"pr\", [0, 1])\n self.check_greater_or_equal(Ge, \"Ge\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_neighbors = n_neighbors\n self.maxn = maxn\n self.n_pop = n_pop\n self.popl3 = popl3\n self.pm = pm\n self.pr = pr\n self.Ge = Ge\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return cls.generate_parameter_combinations({'n_neighbors': [7],\n 'maxn': [2, 3, 4],\n 'n_pop': [10],\n 'popl3': [4],\n 'pm': [0.3],\n 'pr': [0.2],\n 'Ge': [10]}, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model to find minority neighbors of\n # minority samples\n n_neighbors = min([self.n_neighbors + 1, len(X_min)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n kfold = KFold(min([len(X), 5]))\n\n def fitness(conf):\n \"\"\"\n Evluate fitness of configuration\n\n Args:\n conf (list(list)): configuration\n \"\"\"\n # generate new samples\n samples = []\n for i in range(len(conf)):\n for _ in range(conf[i]):\n X_b = X_min[self.random_state.choice(ind[i][1:])]\n samples.append(self.sample_between_points(X_min[i], X_b))\n\n if len(samples) == 0:\n # if no samples are generated\n X_new = X\n y_new = y\n else:\n # construct dataset\n X_new = np.vstack([X, np.vstack(samples)])\n y_new = np.hstack(\n [y, np.repeat(self.min_label, len(samples))])\n\n # execute kfold cross validation\n preds, tests = [], []\n for train, test in kfold.split(X_new):\n dt = DecisionTreeClassifier(random_state=self.random_state)\n dt.fit(X_new[train], y_new[train])\n preds.append(dt.predict(X_new[test]))\n tests.append(y_new[test])\n preds = np.hstack(preds)\n tests = np.hstack(tests)\n\n # compute fitness measure\n tp = np.sum(np.logical_and(\n tests == self.min_label, tests == preds))\n tn = np.sum(np.logical_and(\n tests == self.maj_label, tests == preds))\n fp = np.sum(np.logical_and(\n tests == self.maj_label, tests != preds))\n fn = np.sum(np.logical_and(\n tests == self.min_label, tests != preds))\n sens = tp/(tp + fn)\n spec = tn/(fp + tn)\n\n return np.sqrt(sens*spec)\n\n def crossover(conf_a, conf_b):\n \"\"\"\n Crossover\n\n Args:\n conf_a (list(list)): configuration to crossover\n conf_b (list(list)): configuration to crossover\n\n Returns:\n list(list), list(list): the configurations after crossover\n \"\"\"\n for _ in range(self.popl3):\n k = self.random_state.randint(len(conf_a))\n conf_a = np.hstack([conf_a[:k], conf_b[k:]])\n conf_b = np.hstack([conf_b[:k], conf_a[k:]])\n return conf_a, conf_b\n\n def mutation(conf, ge):\n \"\"\"\n Mutation\n\n Args:\n conf (list(list)): configuration to mutate\n ge (int): iteration number\n \"\"\"\n conf = conf.copy()\n if self.random_state.random_sample() < self.pm:\n pass\n else:\n for i in range(len(conf)):\n r = self.random_state.random_sample()\n r = r**((1 - ge/self.Ge)**3)\n if self.random_state.randint(2) == 0:\n conf[i] = int(conf[i] + (self.maxn - conf[i])*r)\n else:\n conf[i] = int(conf[i] - (conf[i] - 0)*r)\n return conf\n\n # generate initial population\n def init_pop():\n return self.random_state.randint(self.maxn, size=len(X_min))\n\n population = [[init_pop(), 0] for _ in range(self.n_pop)]\n\n # calculate fitness values\n for p in population:\n p[1] = fitness(p[0])\n\n # start iteration\n ge = 0\n while ge < self.Ge:\n # sorting population in descending order by fitness scores\n population = sorted(population, key=lambda x: -x[1])\n\n # selection operation (Step 2)\n pp = int(self.n_pop*self.pr)\n population_new = []\n for i in range(pp):\n population_new.append(population[i])\n population_new.extend(population[:(self.n_pop - pp)])\n population = population_new\n\n # crossover\n for _ in range(int(self.n_pop/2)):\n pop_0 = population[self.random_state.randint(self.n_pop)][0]\n pop_1 = population[self.random_state.randint(self.n_pop)][0]\n conf_a, conf_b = crossover(pop_0, pop_1)\n population.append([conf_a, fitness(conf_a)])\n population.append([conf_b, fitness(conf_b)])\n\n # mutation\n for _ in range(int(self.n_pop/2)):\n pop_0 = population[self.random_state.randint(self.n_pop)][0]\n conf = mutation(pop_0, ge)\n population.append([conf, fitness(conf)])\n\n ge = ge + 1\n\n # sorting final population\n population = sorted(population, key=lambda x: -x[1])\n\n # get best configuration\n conf = population[0][0]\n\n # generate final samples\n samples = []\n for i in range(len(conf)):\n for _ in range(conf[i]):\n samples.append(self.sample_between_points(\n X_min[i], X_min[self.random_state.choice(ind[i][1:])]))\n\n if len(samples) == 0:\n return X.copy(), y.copy()\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_neighbors': self.n_neighbors,\n 'maxn': self.maxn,\n 'n_pop': self.n_pop,\n 'popl3': self.popl3,\n 'pm': self.pm,\n 'pr': self.pr,\n 'Ge': self.Ge,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass A_SUWO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{a_suwo,\n title = \"Adaptive semi-unsupervised weighted\n oversampling (A-SUWO) for imbalanced\n datasets\",\n journal = \"Expert Systems with Applications\",\n volume = \"46\",\n pages = \"405 - 416\",\n year = \"2016\",\n issn = \"0957-4174\",\n doi = \"https://doi.org/10.1016/j.eswa.2015.10.031\",\n author = \"Iman Nekooeimehr and Susana K. Lai-Yuen\",\n keywords = \"Imbalanced dataset, Classification,\n Clustering, Oversampling\"\n }\n\n Notes:\n * Equation (7) misses a division by R_j.\n * It is not specified how to sample from clusters with 1 instances.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_density_based,\n OverSampling.cat_noise_removal]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_clus_maj=7,\n c_thres=0.8,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n n_clus_maj (int): number of majority clusters\n c_thres (float): threshold on distances\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_clus_maj, \"n_clus_maj\", 1)\n self.check_greater_or_equal(c_thres, \"c_thres\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_clus_maj = n_clus_maj\n self.c_thres = c_thres\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_clus_maj': [5, 7, 9],\n 'c_thres': [0.5, 0.8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_orig, y_orig = X, y\n\n # fitting nearest neighbors to find neighbors of all samples\n n_neighbors = min([len(X), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X)\n\n # identifying as noise those samples which do not have neighbors of\n # the same label\n def noise_func(i):\n return np.sum(y[ind[i][1:]] == y[i]) == 0\n noise = np.where(np.array([noise_func(i) for i in range(len(X))]))[0]\n\n # removing noise\n X = np.delete(X, noise, axis=0)\n y = np.delete(y, noise)\n\n # extarcting modified minority and majority datasets\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n if len(X_min) == 0:\n _logger.info(\"All minority samples removed as noise\")\n return X_orig.copy(), y_orig.copy()\n\n n_clus_maj = min([len(X_maj), self.n_clus_maj])\n\n # clustering majority samples\n ac = AgglomerativeClustering(n_clusters=n_clus_maj)\n ac.fit(X_maj)\n maj_clusters = [np.where(ac.labels_ == i)[0]\n for i in range(n_clus_maj)]\n\n if len(maj_clusters) == 0:\n return X_orig.copy(), y_orig.copy()\n\n # initialize minority clusters\n min_clusters = [np.array([i]) for i in range(len(X_min))]\n\n # compute minority distance matrix of cluster\n dm_min = pairwise_distances(X_min)\n for i in range(len(dm_min)):\n dm_min[i, i] = np.inf\n\n # compute distance matrix of minority and majority clusters\n dm_maj = np.zeros(shape=(len(X_min), len(maj_clusters)))\n for i in range(len(X_min)):\n for j in range(len(maj_clusters)):\n pairwd = pairwise_distances(X_min[min_clusters[i]],\n X_maj[maj_clusters[j]])\n dm_maj[i, j] = np.min(pairwd)\n\n # compute threshold\n nn = NearestNeighbors(n_neighbors=len(X_min), n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n d_med = np.median(dist, axis=1)\n T = np.mean(d_med)*self.c_thres\n\n # do the clustering of minority samples\n while True:\n # finding minimum distance between minority clusters\n pi = np.min(dm_min)\n\n # if the minimum distance is higher than the threshold, stop\n if pi > T:\n break\n\n # find cluster pair of minimum distance\n min_dist_pair = np.where(dm_min == pi)\n min_i = min_dist_pair[0][0]\n min_j = min_dist_pair[1][0]\n\n # Step 3 - find majority clusters closer than pi\n A = np.where(np.logical_and(dm_maj[min_i] < pi,\n dm_maj[min_j] < pi))[0]\n\n # Step 4 - checking if there is a majority cluster between the\n # minority ones\n if len(A) > 0:\n dm_min[min_i, min_j] = np.inf\n dm_min[min_j, min_i] = np.inf\n else:\n # Step 5\n # unifying minority clusters\n min_clusters[min_i] = np.hstack([min_clusters[min_i],\n min_clusters[min_j]])\n # removing one of them\n min_clusters = np.delete(min_clusters, min_j)\n\n # updating the minority distance matrix\n dm_min[min_i] = np.min(np.vstack([dm_min[min_i],\n dm_min[min_j]]), axis=0)\n dm_min[:, min_i] = dm_min[min_i]\n # removing jth row and column (merged in i)\n dm_min = np.delete(dm_min, min_j, axis=0)\n dm_min = np.delete(dm_min, min_j, axis=1)\n\n # fixing the diagonal elements\n for i in range(len(dm_min)):\n dm_min[i, i] = np.inf\n\n # updating the minority-majority distance matrix\n dm_maj[min_i] = np.min(np.vstack([dm_maj[min_i],\n dm_maj[min_j]]), axis=0)\n dm_maj = np.delete(dm_maj, min_j, axis=0)\n\n # adaptive sub-cluster sizing\n eps = []\n # going through all minority clusters\n for c in min_clusters:\n # checking if cluster size is higher than 1\n if len(c) > 1:\n k = min([len(c), 5])\n kfold = KFold(k, random_state=self.random_state)\n preds = []\n # executing k-fold cross validation with linear discriminant\n # analysis\n X_c = X_min[c]\n for train, test in kfold.split(X_c):\n X_train = np.vstack([X_maj, X_c[train]])\n y_train_maj = np.repeat(self.maj_label, len(X_maj))\n y_train_min = np.repeat(self.min_label, len(X_c[train]))\n y_train = np.hstack([y_train_maj, y_train_min])\n ld = LinearDiscriminantAnalysis()\n ld.fit(X_train, y_train)\n preds.append(ld.predict(X_c[test]))\n preds = np.hstack(preds)\n # extracting error rate\n eps.append(np.sum(preds == self.maj_label)/len(preds))\n else:\n eps.append(1.0)\n\n # sampling distribution over clusters\n min_cluster_dist = eps/np.sum(eps)\n\n # synthetic instance generation - determining within cluster\n # distribution finding majority neighbor distances of minority\n # samples\n nn = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn.fit(X_maj)\n dist, ind = nn.kneighbors(X_min)\n dist = dist/len(X[0])\n dist = 1.0/dist\n\n # computing the THs\n THs = []\n for c in min_clusters:\n THs.append(np.mean(dist[c, 0]))\n\n # determining within cluster distributions\n within_cluster_dist = []\n for i, c in enumerate(min_clusters):\n Gamma = dist[c, 0]\n Gamma[Gamma > THs[i]] = THs[i]\n within_cluster_dist.append(Gamma/np.sum(Gamma))\n\n # extracting within cluster neighbors\n within_cluster_neighbors = []\n for c in min_clusters:\n n_neighbors = min([len(c), self.n_neighbors])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min[c])\n within_cluster_neighbors.append(nn.kneighbors(X_min[c])[1])\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n # choose random cluster index\n cluster_idx = self.random_state.choice(\n np.arange(len(min_clusters)), p=min_cluster_dist)\n if len(min_clusters[cluster_idx]) > 1:\n # if the cluster has at least two elemenets\n domain = np.arange(len(min_clusters[cluster_idx]))\n distribution = within_cluster_dist[cluster_idx]\n sample_idx = self.random_state.choice(domain, p=distribution)\n\n domain = within_cluster_neighbors[cluster_idx][sample_idx][1:]\n neighbor_idx = self.random_state.choice(domain)\n point = X_min[min_clusters[cluster_idx][sample_idx]]\n neighbor = X_min[min_clusters[cluster_idx][neighbor_idx]]\n samples.append(self.sample_between_points(point, neighbor))\n else:\n samples.append(X_min[min_clusters[cluster_idx][0]])\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_clus_maj': self.n_clus_maj,\n 'c_thres': self.c_thres,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_FRST_2T(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_frst_2t,\n title = \"Fuzzy-rough imbalanced learning for the\n diagnosis of High Voltage Circuit\n Breaker maintenance: The SMOTE-FRST-2T\n algorithm\",\n journal = \"Engineering Applications of Artificial\n Intelligence\",\n volume = \"48\",\n pages = \"134 - 139\",\n year = \"2016\",\n issn = \"0952-1976\",\n doi = \"https://doi.org/10.1016/j.engappai.2015.10.009\",\n author = \"Ramentol, E. and Gondres, I. and Lajes, S.\n and Bello, R. and Caballero,Y. and\n Cornelis, C. and Herrera, F.\",\n keywords = \"High Voltage Circuit Breaker (HVCB),\n Imbalanced learning, Fuzzy rough set\n theory, Resampling methods\"\n }\n\n Notes:\n * Unlucky setting of parameters might result 0 points added, we have\n fixed this by increasing the gamma_S threshold if the number of\n samples accepted is low.\n * Similarly, unlucky setting of parameters might result all majority\n samples turned into minority.\n * In my opinion, in the algorithm presented in the paper the\n relations are incorrect. The authors talk about accepting samples\n having POS score below a threshold, and in the algorithm in\n both places POS >= gamma is used.\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_noise_removal,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_application]\n\n def __init__(self,\n n_neighbors=5,\n gamma_S=0.7,\n gamma_M=0.03,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_neighbors (int): number of neighbors in the SMOTE sampling\n gamma_S (float): threshold of synthesized samples\n gamma_M (float): threshold of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(gamma_S, \"gamma_S\", 0)\n self.check_greater_or_equal(gamma_M, \"gamma_M\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.gamma_S = gamma_S\n self.gamma_M = gamma_M\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'n_neighbors': [3, 5, 7],\n 'gamma_S': [0.8, 1.0],\n 'gamma_M': [0.03, 0.05, 0.1]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # Turning the ranges to 1 speeds up the positive membership\n # calculations\n mmscaler = MinMaxScaler()\n X = mmscaler.fit_transform(X)\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n # extracting the attribute ranges\n\n d = len(X[0])\n\n # after MinMax scaling, the POS value can be calculated as follows\n pos_cache = pairwise_distances(X_min, X_maj, metric='l1')\n pos_cache = 1.0 - pos_cache\n pos_cache = pos_cache.clip(0, d)\n pos_cache = 1.0 - pos_cache\n\n # initializing some lists containing the results\n result_synth = []\n result_maj = []\n iteration = 0\n\n gamma_S = self.gamma_S\n gamma_M = self.gamma_M\n\n # iterating until the dataset becomes balanced\n while (len(X_min) + len(result_synth) + len(result_maj)) < len(X_maj):\n _logger.info(self.__class__.__name__ + \":\" +\n (\"iteration: %d\" % iteration))\n # checking if the parameters aren't too conservative\n if len(result_synth) < iteration:\n gamma_S = gamma_S*1.1\n _logger.info(self.__class__.__name__ + \": \" +\n \"gamma_S increased to %f\" % gamma_S)\n\n # determine proportion\n diff = (sum(y == self.maj_label) -\n sum(y == self.min_label))\n prop = max(1.1/diff, 0.2)\n\n # executing SMOTE to generate some minority samples\n smote = SMOTE(proportion=prop,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n X_samp, y_samp = smote.sample(X, y)\n X_samp = X_samp[len(X):]\n\n new_synth = []\n\n # computing POS membership values for the new samples\n pos_synth = pairwise_distances(X_min, X_samp, metric='l1')\n pos_synth = 1.0 - pos_synth\n pos_synth = pos_synth.clip(0, d)\n pos_synth = 1.0 - pos_synth\n\n # adding samples with POS membership smaller than gamma_S to the\n # minority set\n min_pos = np.min(pos_synth, axis=0)\n to_add = np.where(min_pos < gamma_S)[0]\n result_synth.extend(X_samp[to_add])\n new_synth.extend(X_samp[to_add])\n\n # checking the minimum POS values of the majority samples\n min_pos = np.min(pos_cache, axis=0)\n to_remove = np.where(min_pos < self.gamma_M)[0]\n\n # if the number of majority samples with POS membership smaller\n # than gamma_M is not extreme, then changing labels, otherwise\n # decreasing gamma_M\n if len(to_remove) > (len(X_maj) - len(X_min))/2:\n to_remove = np.array([])\n gamma_M = gamma_M*0.9\n _logger.info(self.__class__.__name__ + \": \" +\n \"gamma_M decreased to %f\" % gamma_M)\n else:\n result_maj.extend(X_maj[to_remove])\n X_maj = np.delete(X_maj, to_remove, axis=0)\n pos_cache = np.delete(pos_cache, to_remove, axis=1)\n\n # updating pos cache\n if len(new_synth) > 0:\n pos_cache_new = pairwise_distances(\n np.vstack(new_synth), X_maj, metric='l1')\n pos_cache_new = 1.0 - pos_cache_new\n pos_cache_new = pos_cache_new.clip(0, d)\n pos_cache_new = 1.0 - pos_cache_new\n\n pos_cache = np.vstack([pos_cache, pos_cache_new])\n\n message = \"minority added: %d, majority removed %d\"\n message = message % (len(to_add), len(to_remove))\n _logger.info(self.__class__.__name__ + \":\" + message)\n\n iteration = iteration + 1\n\n # packing the results\n X_res = np.vstack([X_maj, X_min])\n if len(result_synth) > 0:\n X_res = np.vstack([X_res, np.vstack(result_synth)])\n if len(result_maj) > 0:\n X_res = np.vstack([X_res, np.vstack(result_maj)])\n\n if len(X_maj) == 0:\n _logger.warning('All majority samples removed')\n return mmscaler.inverse_transform(X), y\n\n y_res_maj = np.repeat(self.maj_label, len(X_maj))\n n_y_res_min = len(X_min) + len(result_synth) + len(result_maj)\n y_res_min = np.repeat(self.min_label, n_y_res_min)\n y_res = np.hstack([y_res_maj, y_res_min])\n\n return mmscaler.inverse_transform(X_res), y_res\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_neighbors': self.n_neighbors,\n 'gamma_S': self.gamma_S,\n 'gamma_M': self.gamma_M,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass AND_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{and_smote,\n author = {Yun, Jaesub and Ha,\n Jihyun and Lee, Jong-Seok},\n title = {Automatic Determination of Neighborhood\n Size in SMOTE},\n booktitle = {Proceedings of the 10th International\n Conference on Ubiquitous\n Information Management and\n Communication},\n series = {IMCOM '16},\n year = {2016},\n isbn = {978-1-4503-4142-4},\n location = {Danang, Viet Nam},\n pages = {100:1--100:8},\n articleno = {100},\n numpages = {8},\n doi = {10.1145/2857546.2857648},\n acmid = {2857648},\n publisher = {ACM},\n address = {New York, NY, USA},\n keywords = {SMOTE, imbalanced learning, synthetic\n data generation},\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self, proportion=1.0, K=15, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n K (int): maximum number of nearest neighbors\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(K, \"K\", 2)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.K = K\n self.n_jobs = n_jobs\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'K': [9, 15, 21]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n K = min([len(X_min), self.K])\n # find K nearest neighbors of all samples\n nn = NearestNeighbors(n_neighbors=K, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X)\n\n min_ind = np.where(y == self.min_label)[0]\n\n # Executing the algorithm\n kappa = []\n for i in range(len(min_ind)):\n regions_min = []\n regions_maj = []\n\n for j in range(1, K):\n # continueing if the label of the neighbors is minority\n if y[ind[min_ind[i]][j]] != self.min_label:\n continue\n\n # region coordinates\n reg = np.hstack([min_ind[i], ind[min_ind[i]][j]])\n # compute corner points\n reg_min = np.min(X[reg])\n reg_max = np.max(X[reg])\n\n r_min = []\n r_maj = []\n # all the points in the region must be among the neighbors\n # what we do is counting how many of them are minority and\n # majority samples\n for k in ind[min_ind[i]][:(j+1)]:\n if np.all(reg_min <= X[k]) and np.all(X[k] <= reg_max):\n if y[k] == self.min_label:\n r_min.append(k)\n else:\n r_maj.append(k)\n\n # appending the coordinates of points to the minority and\n # majority regions\n regions_min.append(r_min)\n regions_maj.append(r_maj)\n\n # taking the cumulative unions of minority and majority points\n for j in range(1, len(regions_min)):\n regions_min[j] = list(\n set(regions_min[j]).union(set(regions_min[j-1])))\n regions_maj[j] = list(\n set(regions_maj[j]).union(set(regions_maj[j-1])))\n\n # computing the lengths of the increasing minority and majority\n # sets\n regions_min = np.array([len(r) for r in regions_min])\n regions_maj = np.array([len(r) for r in regions_maj])\n\n # computing the precision of minority classification (all points\n # are supposed to be classified as minority)\n prec = regions_min/(regions_min + regions_maj)\n # taking the difference\n d = np.diff(prec, 1)\n # finding the biggest drop (+1 because diff reduces length, +1\n # because of indexing begins with 0)\n if len(d) == 0:\n k = 0\n else:\n k = np.argmin(d) + 2\n # appending the coordinate of the biggest drop as the ideal\n # neighborhood size note that k indices the minority neighbors\n kappa.append(k)\n\n # finding nearest minority neighbors of minority samples\n nn = NearestNeighbors(n_neighbors=max(kappa) + 1, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n if np.sum(kappa) == 0:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"No minority samples in nearest neighbors\")\n return X.copy(), y.copy()\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n # choose random point\n idx = self.random_state.randint(len(X_min))\n if kappa[idx] > 0:\n domain = ind[idx][1:(kappa[idx]+1)]\n X_b = X_min[self.random_state.choice(domain)]\n samples.append(self.sample_between_points(X_min[idx], X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'K': self.K,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass NRAS(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{nras,\n title = \"Noise Reduction A Priori Synthetic\n Over-Sampling for class imbalanced data\n sets\",\n journal = \"Information Sciences\",\n volume = \"408\",\n pages = \"146 - 161\",\n year = \"2017\",\n issn = \"0020-0255\",\n doi = \"https://doi.org/10.1016/j.ins.2017.04.046\",\n author = \"William A. Rivera\",\n keywords = \"NRAS, SMOTE, OUPS, Class imbalance,\n Classification\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_noise_removal]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n t=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors\n t (float): [0,1] fraction of n_neighbors as threshold\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_in_range(t, \"t\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.t = t\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [5, 7, 9],\n 't': [0.3, 0.5, 0.8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # standardization is needed to make the range of the propensity scores\n # similar to that of the features\n mms = MinMaxScaler()\n X_trans = mms.fit_transform(X)\n\n # determining propensity scores using logistic regression\n lr = LogisticRegression(solver='lbfgs',\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n lr.fit(X_trans, y)\n propensity = lr.predict_proba(X_trans)[:, np.where(\n lr.classes_ == self.min_label)[0][0]]\n\n X_min = X_trans[y == self.min_label]\n\n # adding propensity scores as a new feature\n X_new = np.column_stack([X_trans, propensity])\n X_min_new = X_new[y == self.min_label]\n\n # finding nearest neighbors of minority samples\n n_neighbors = min([len(X_new), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_new)\n dist, ind = nn.kneighbors(X_min_new)\n\n # do the sampling\n samples = []\n to_remove = []\n while len(samples) < n_to_sample:\n idx = self.random_state.randint(len(X_min))\n # finding the number of minority neighbors\n t_hat = np.sum(y[ind[idx][1:]] == self.min_label)\n if t_hat < self.t*n_neighbors:\n # removing the minority point if the number of minority\n # neighbors is less then the threshold\n # to_remove indexes X_min\n if idx not in to_remove:\n to_remove.append(idx)\n # compensating the removal of the minority point\n n_to_sample = n_to_sample + 1\n\n if len(to_remove) == len(X_min):\n _logger.warning(self.__class__.__name__ + \": \" +\n \"all minority samples identified as noise\")\n return X.copy(), y.copy()\n else:\n # otherwise do the sampling\n X_b = X_trans[self.random_state.choice(ind[idx][1:])]\n samples.append(self.sample_between_points(X_min[idx], X_b))\n\n # remove noisy elements\n X_maj = X_trans[y == self.maj_label]\n X_min = np.delete(X_min, to_remove, axis=0)\n\n return (mms.inverse_transform(np.vstack([X_maj,\n X_min,\n np.vstack(samples)])),\n np.hstack([np.repeat(self.maj_label, len(X_maj)),\n np.repeat(self.min_label, len(X_min)),\n np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 't': self.t,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass AMSCO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{amsco,\n title = \"Adaptive multi-objective swarm fusion for\n imbalanced data classification\",\n journal = \"Information Fusion\",\n volume = \"39\",\n pages = \"1 - 24\",\n year = \"2018\",\n issn = \"1566-2535\",\n doi = \"https://doi.org/10.1016/j.inffus.2017.03.007\",\n author = \"Jinyan Li and Simon Fong and Raymond K.\n Wong and Victor W. Chu\",\n keywords = \"Swarm fusion, Swarm intelligence\n algorithm, Multi-objective, Crossover\n rebalancing, Imbalanced data\n classification\"\n }\n\n Notes:\n * It is not clear how the kappa threshold is used, I do use the RA\n score to drive all the evolution. Particularly:\n\n \"In the last phase of each iteration, the average Kappa value\n in current non-inferior set is compare with the latest threshold\n value, the threshold is then increase further if the average value\n increases, and vice versa. By doing so, the non-inferior region\n will be progressively reduced as the Kappa threshold lifts up.\"\n\n I don't see why would the Kappa threshold lift up if the kappa\n thresholds are decreased if the average Kappa decreases (\"vice versa\").\n\n * Due to the interpretation of kappa threshold and the lack of detailed\n description of the SIS process, the implementation is not exactly\n what is described in the paper, but something very similar.\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_memetic,\n OverSampling.cat_uses_classifier]\n\n def __init__(self,\n n_pop=5,\n n_iter=15,\n omega=0.1,\n r1=0.1,\n r2=0.1,\n n_jobs=1,\n classifier=DecisionTreeClassifier(random_state=2),\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_pop (int): size of populations\n n_iter (int): optimization steps\n omega (float): intertia of PSO\n r1 (float): force towards local optimum\n r2 (float): force towards global optimum\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(n_pop, \"n_pop\", 1)\n self.check_greater_or_equal(n_iter, \"n_iter\", 1)\n self.check_greater_or_equal(omega, \"omega\", 0)\n self.check_greater_or_equal(r1, \"r1\", 0)\n self.check_greater_or_equal(r2, \"r2\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_pop = n_pop\n self.n_iter = n_iter\n self.omega = omega\n self.r1 = r1\n self.r2 = r2\n self.n_jobs = n_jobs\n self.classifier = classifier\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n # as the method is an overall optimization, 1 reasonable settings\n # should be enough\n\n classifiers = [DecisionTreeClassifier(random_state=2)]\n parameter_combinations = {'n_pop': [5],\n 'n_iter': [15],\n 'omega': [0.1],\n 'r1': [0.1],\n 'r2': [0.1],\n 'classifier': classifiers}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n n_cross_val = min([4, len(X_min)])\n\n def fitness(X_min, X_maj):\n \"\"\"\n Calculating fitness function\n\n Args:\n X_min (np.matrix): minority samples\n X_maj (np.matrix): majority samples\n\n Returns:\n float, float: kappa, accuracy\n \"\"\"\n kfold = StratifiedKFold(n_cross_val)\n\n # prepare assembled dataset\n X_ass = np.vstack([X_min, X_maj])\n y_ass = np.hstack([np.repeat(self.min_label, len(X_min)),\n np.repeat(self.maj_label, len(X_maj))])\n\n preds = []\n tests = []\n for train, test in kfold.split(X_ass, y_ass):\n self.classifier.fit(X_ass[train], y_ass[train])\n preds.append(self.classifier.predict(X))\n tests.append(y)\n preds = np.hstack(preds)\n tests = np.hstack(tests)\n\n # calculate kappa and accuracy scores\n tp = np.sum(np.logical_and(preds == tests,\n tests == self.min_label))\n fn = np.sum(np.logical_and(preds != tests,\n tests == self.min_label))\n tn = np.sum(np.logical_and(preds == tests,\n tests == self.maj_label))\n fp = np.sum(np.logical_and(preds != tests,\n tests == self.maj_label))\n\n p_o = (tp + tn)/(tp + fn + tn + fp)\n p_e = (tp + fn)*(tp + fp)/(tp + fn + tn + fp)**2 + \\\n (fp + tn)*(fn + tn)/(tp + fn + tn + fp)**2\n\n kappa = (p_o - p_e)/(1.0 - p_e)\n accuracy = (tp + tn)/(tp + fn + tn + fp)\n\n return kappa, accuracy\n\n def OSMOTE(X_min, X_maj):\n \"\"\"\n Executing OSMOTE phase\n\n Args:\n X_min (np.matrix): minority samples\n X_maj (np.matrix): majority samples\n\n Returns:\n np.matrix, np.matrix: new minority and majority datasets\n \"\"\"\n\n # initialize particles, first coordinate represents proportion\n # parameter of SMOTE\n # the second coordinate represents the number of neighbors to\n # take into consideration\n def init_pop():\n proportion = self.random_state.random_sample()/2.0+0.5\n n_neighbors = self.random_state.randint(3, 10)\n return np.array([proportion, n_neighbors])\n particles = [init_pop() for _ in range(self.n_pop)]\n # velocities initialized\n velocities = [np.array([0.1, 1]) for _ in range(self.n_pop)]\n # setting the limits of the search space\n limits = [np.array([0.25, 3]), np.array([4.0, 10])]\n # local best results\n local_best = [particles[i].copy() for i in range(self.n_pop)]\n # local best scores\n local_score = [(0.0, 0.0)]*self.n_pop\n # global best result\n global_best = particles[0].copy()\n # global best score\n global_score = (0.0, 0.0)\n # best dataset\n best_dataset = None\n\n # running the optimization\n for _ in range(self.n_iter):\n # update velocities\n for i in range(len(velocities)):\n diff1 = (local_best[i] - velocities[i])\n diff2 = (global_best - velocities[i])\n velocities[i] = (velocities[i]*self.omega +\n self.r1 * diff1 + self.r2*diff2)\n # clipping velocities using the upper bounds of the\n # particle search space\n velocities[i][0] = np.clip(\n velocities[i][0], -limits[1][0]/2, limits[1][0]/2)\n velocities[i][1] = np.clip(\n velocities[i][1], -limits[1][1]/2, limits[1][1]/2)\n\n # update particles\n for i in range(len(particles)):\n particles[i] = particles[i] + velocities[i]\n # clipping the particle positions using the lower and\n # upper bounds\n particles[i][0] = np.clip(\n particles[i][0], limits[0][0], limits[1][0])\n particles[i][1] = np.clip(\n particles[i][1], limits[0][1], limits[1][1])\n\n # evaluate\n scores = []\n for i in range(len(particles)):\n # apply SMOTE\n smote = SMOTE(particles[i][0],\n int(np.rint(particles[i][1])),\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n X_to_sample = np.vstack([X_maj, X_min])\n y_to_sample_maj = np.repeat(\n self.maj_label, len(X_maj))\n y_to_sample_min = np.repeat(\n self.min_label, len(X_min))\n y_to_sample = np.hstack([y_to_sample_maj, y_to_sample_min])\n X_samp, y_samp = smote.sample(X_to_sample, y_to_sample)\n\n # evaluate\n scores.append(fitness(X_samp[len(X_maj):],\n X_samp[:len(X_maj)]))\n\n # update scores according to the multiobjective setting\n if (scores[i][0]*scores[i][1] >\n local_score[i][0]*local_score[i][1]):\n local_best[i] = particles[i].copy()\n local_score[i] = scores[i]\n if (scores[i][0]*scores[i][1] >\n global_score[0]*global_score[1]):\n global_best = particles[i].copy()\n global_score = scores[i]\n best_dataset = (X_samp[len(X_maj):],\n X_samp[:len(X_maj)])\n\n return best_dataset[0], best_dataset[1]\n\n def SIS(X_min, X_maj):\n \"\"\"\n SIS procedure\n\n Args:\n X_min (np.matrix): minority dataset\n X_maj (np.matrix): majority dataset\n\n Returns:\n np.matrix, np.matrix: new minority and majority datasets\n \"\"\"\n min_num = len(X_min)\n max_num = len(X_maj)\n if min_num >= max_num:\n return X_min, X_maj\n\n # initiate particles\n def init_particle():\n num = self.random_state.randint(min_num, max_num)\n maj = self.random_state.choice(np.arange(len(X_maj)), num)\n return maj\n\n particles = [init_particle() for _ in range(self.n_pop)]\n scores = [fitness(X_min, X_maj[particles[i]])\n for i in range(self.n_pop)]\n best_score = (0.0, 0.0)\n best_dataset = None\n\n for _ in range(self.n_iter):\n # mutate and evaluate\n # the way mutation or applying PSO is not described in the\n # paper in details\n for i in range(self.n_pop):\n # removing some random elements\n domain = np.arange(len(particles[i]))\n n_max = min([10, len(particles[i])])\n n_to_choose = self.random_state.randint(0, n_max)\n to_remove = self.random_state.choice(domain, n_to_choose)\n mutant = np.delete(particles[i], to_remove)\n\n # adding some random elements\n maj_set = set(np.arange(len(X_maj)))\n part_set = set(particles[i])\n diff = list(maj_set.difference(part_set))\n n_max = min([10, len(diff)])\n n_to_choose = self.random_state.randint(0, n_max)\n diff_elements = self.random_state.choice(diff, n_to_choose)\n mutant = np.hstack([mutant, np.array(diff_elements)])\n # evaluating the variant\n score = fitness(X_min, X_maj[mutant])\n if score[1] > scores[i][1]:\n particles[i] = mutant.copy()\n scores[i] = score\n if score[1] > best_score[1]:\n best_score = score\n best_dataset = mutant.copy()\n\n return X_min, X_maj[best_dataset]\n\n # executing the main optimization procedure\n current_min = X_min\n current_maj = X_maj\n for it in range(self.n_iter):\n _logger.info(self.__class__.__name__ + \": \" +\n 'staring iteration %d' % it)\n new_min, _ = OSMOTE(X_min, current_maj)\n _, new_maj = SIS(current_min, X_maj)\n\n # calculating fitness values of the four combinations\n fitness_0 = np.prod(fitness(new_min, current_maj))\n fitness_1 = np.prod(fitness(current_min, current_maj))\n fitness_2 = np.prod(fitness(new_min, new_maj))\n fitness_3 = np.prod(fitness(current_min, new_maj))\n\n # selecting the new current_maj and current_min datasets\n message = 'fitness scores: %f %f %f %f'\n message = message % (fitness_0, fitness_1, fitness_2, fitness_3)\n _logger.info(self.__class__.__name__ + \": \" + message)\n max_fitness = np.max([fitness_0, fitness_1, fitness_2, fitness_3])\n if fitness_1 == max_fitness or fitness_3 == max_fitness:\n current_maj = new_maj\n if fitness_0 == max_fitness or fitness_2 == max_fitness:\n current_min = new_min\n\n return (np.vstack([current_maj, current_min]),\n np.hstack([np.repeat(self.maj_label, len(current_maj)),\n np.repeat(self.min_label, len(current_min))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_pop': self.n_pop,\n 'n_iter': self.n_iter,\n 'omega': self.omega,\n 'r1': self.r1,\n 'r2': self.r2,\n 'n_jobs': self.n_jobs,\n 'classifier': self.classifier,\n 'random_state': self._random_state_init}\n\n\nclass SSO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{sso,\n author=\"Rong, Tongwen\n and Gong, Huachang\n and Ng, Wing W. Y.\",\n editor=\"Wang, Xizhao\n and Pedrycz, Witold\n and Chan, Patrick\n and He, Qiang\",\n title=\"Stochastic Sensitivity Oversampling\n Technique for Imbalanced Data\",\n booktitle=\"Machine Learning and Cybernetics\",\n year=\"2014\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"161--171\",\n isbn=\"978-3-662-45652-1\"\n }\n\n Notes:\n * In the algorithm step 2d adds a constant to a vector. I have\n changed it to a componentwise adjustment, and also used the\n normalized STSM as I don't see any reason why it would be\n some reasonable, bounded value.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_classifier,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_density_based]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n h=10,\n n_iter=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n h (int): number of hidden units\n n_iter (int): optimization steps\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(h, \"h\", 1)\n self.check_greater_or_equal(n_iter, \"n_iter\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.h = h\n self.n_iter = n_iter\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5],\n 'h': [2, 5, 10, 20],\n 'n_iter': [5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # number of samples to generate in each iteration\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n samp_per_iter = max([1, int(n_to_sample/self.n_iter)])\n\n # executing the algorithm\n for _ in range(self.n_iter):\n X_min = X[y == self.min_label]\n\n # applying kmeans clustering to find the hidden neurons\n h = min([self.h, len(X_min)])\n kmeans = KMeans(n_clusters=h,\n random_state=self.random_state)\n kmeans.fit(X)\n\n # extracting the hidden center elements\n u = kmeans.cluster_centers_\n\n # extracting scale parameters as the distances of closest centers\n nn_cent = NearestNeighbors(n_neighbors=2, n_jobs=self.n_jobs)\n nn_cent.fit(u)\n dist_cent, ind_cent = nn_cent.kneighbors(u)\n v = dist_cent[:, 1]\n\n # computing the response of the hidden units\n phi = pairwise_distances(X, u)\n phi = phi**2\n phi = np.exp(-phi/v**2)\n\n # applying linear regression to find the best weights\n lr = LinearRegression()\n lr.fit(phi, y)\n f = lr.predict(phi[np.where(y == self.min_label)[0]])\n w = lr.coef_\n\n def eq_6(Q, w, u, v, x):\n \"\"\"\n Equation 6 in the paper\n \"\"\"\n tmp_sum = np.zeros(h)\n for i in range(h):\n a = (x - u[i] + Q)/np.sqrt(2*v[i])\n b = (x - u[i] - Q)/np.sqrt(2*v[i])\n tmp_prod = (sspecial.erf(a) - sspecial.erf(b))\n tmp_sum[i] = np.sqrt(np.pi/2)*v[i]*np.prod(tmp_prod)\n return np.dot(tmp_sum, w)/(2*Q)**len(x)\n\n def eq_8(Q, w, u, v, x):\n \"\"\"\n Equation 8 in the paper\n \"\"\"\n res = 0.0\n for i in range(h):\n vi2 = v[i]**2\n for r in range(h):\n vr2 = v[r]**2\n a1 = (np.sqrt(2*vi2*vr2*(vi2 + vr2)))\n\n a00_v = (vi2 + vr2)*(x + Q)\n a01_v = vi2*u[r] + vr2*u[i]\n a0_v = a00_v - a01_v\n a_v = a0_v/a1\n\n b_v = ((vi2 + vr2)*(x - Q) - (vi2*u[r] + vr2*u[i]))/a1\n tmp_prod = sspecial.erf(a_v) - sspecial.erf(b_v)\n\n tmp_a = (np.sqrt(2*vi2*vr2*(vi2 + vr2)) /\n (vi2 + vr2))**len(x)\n norm = np.linalg.norm(u[r] - u[i])\n tmp_b = np.exp(-0.5 * norm**2/(vi2 + vr2))\n res = res + tmp_a*tmp_b*np.prod(tmp_prod)*w[i]*w[r]\n\n return (np.sqrt(np.pi)/(4*Q))**len(x)*res\n\n # applying nearest neighbors to extract Q values\n n_neighbors = min([self.n_neighbors + 1, len(X)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n Q = np.mean(dist[:, n_neighbors-1])/np.sqrt(len(X[0]))\n\n # calculating the sensitivity factors\n I_1 = np.array([eq_6(Q, w, u, v, x) for x in X_min])\n I_2 = np.array([eq_8(Q, w, u, v, x) for x in X_min])\n\n stsm = f**2 - 2*f*I_1 + I_2\n\n # calculating the sampling weights\n weights = np.abs(stsm)/np.sum(np.abs(stsm))\n\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n samples = []\n for _ in range(samp_per_iter):\n idx = self.random_state.choice(\n np.arange(len(X_min)), p=weights)\n X_new = X_min[idx].copy()\n for s in range(len(X_new)):\n lam = self.random_state.random_sample(\n )*(2*(1 - weights[idx])) - (1 - weights[idx])\n X_new[s] = X_new[s] + Q*lam\n samples.append(X_new)\n\n samples = np.vstack(samples)\n X = np.vstack([X, samples])\n y = np.hstack([y, np.repeat(self.min_label, len(samples))])\n\n return X.copy(), y.copy()\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'h': self.h,\n 'n_iter': self.n_iter,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass NDO_sampling(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{ndo_sampling,\n author={Zhang, L. and Wang, W.},\n booktitle={2011 International Conference of\n Information Technology, Computer\n Engineering and Management Sciences},\n title={A Re-sampling Method for Class Imbalance\n Learning with Credit Data},\n year={2011},\n volume={1},\n number={},\n pages={393-397},\n keywords={data handling;sampling methods;\n resampling method;class imbalance\n learning;credit rating;imbalance\n problem;synthetic minority\n over-sampling technique;sample\n distribution;synthetic samples;\n credit data set;Training;\n Measurement;Support vector machines;\n Logistics;Testing;Noise;Classification\n algorithms;class imbalance;credit\n rating;SMOTE;sample distribution},\n doi={10.1109/ICM.2011.34},\n ISSN={},\n month={Sept}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_application]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n T=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n T (float): threshold parameter\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(T, \"T\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.T = T\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'T': [0.5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model to find the neighbors of minority\n # samples among all elements\n n_neighbors = min([len(X), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n # calculating the distances between samples in the same and different\n # classes\n d_intra = []\n d_exter = []\n for i in range(len(X_min)):\n min_mask = np.where(y[ind[i][1:]] == self.min_label)[0]\n maj_mask = np.where(y[ind[i][1:]] == self.maj_label)[0]\n if len(min_mask) > 0:\n d_intra.append(np.mean(dist[i][1:][min_mask]))\n if len(maj_mask) > 0:\n d_exter.append(np.mean(dist[i][1:][maj_mask]))\n d_intra_mean = np.mean(np.array(d_intra))\n d_exter_mean = np.mean(np.array(d_exter))\n\n # calculating the alpha value\n alpha = d_intra_mean/d_exter_mean\n\n # deciding if SMOTE is enough\n if alpha < self.T:\n smote = SMOTE(self.proportion, random_state=self.random_state)\n return smote.sample(X, y)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.randint(len(X_min))\n random_idx = self.random_state.choice(ind[idx][1:])\n # create sample close to the initial minority point\n samples.append(X_min[idx] + (X[random_idx] - X_min[idx])\n * self.random_state.random_sample()/2.0)\n if y[random_idx] == self.min_label:\n # create another sample close to the neighboring minority point\n samples.append(X[random_idx] + (X_min[idx] - X[random_idx])\n * self.random_state.random_sample()/2.0)\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'T': self.T,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass RBFNeuron(RandomStateMixin):\n \"\"\"\n This class abstracts a neuron of an RBF network\n \"\"\"\n\n def __init__(self,\n c,\n Ib,\n Ob,\n ranges,\n range_mins,\n init_conn_mask,\n init_conn_weights,\n random_state=None):\n \"\"\"\n Constructor of the neuron\n\n Args:\n c (np.array): center of the hidden unit\n Ib (float): upper bound on the absolute values of input weights\n Ob (float): upper bound on the absolute values of output weights\n ranges (np.array): ranges widths of parameters\n range_min (np.array): lower bounds of parameter ranges\n init_conn_mask (np.array): initial input connections\n init_conn_weights (np.array): initial weights of input connections\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n self.d = len(c)\n self.c = c\n self.Ib = Ib\n self.Ob = Ob\n self.init_conn_mask = init_conn_mask\n self.init_conn_weights = init_conn_weights\n self.ranges = ranges\n self.range_mins = range_mins\n\n self.set_random_state(random_state)\n\n self.beta = (self.random_state.random_sample()-0.5)*Ob\n self.mask = init_conn_mask\n self.input_weights = init_conn_weights\n self.r = self.random_state.random_sample()\n\n def clone(self):\n \"\"\"\n Clones the neuron\n\n Returns:\n RBFNeuron: an identical neuron\n \"\"\"\n r = RBFNeuron(self.c,\n self.Ib,\n self.Ob,\n self.ranges,\n self.range_mins,\n self.init_conn_mask,\n self.init_conn_weights,\n random_state=self.random_state)\n r.beta = self.beta\n r.mask = self.mask.copy()\n r.input_weights = self.input_weights.copy()\n r.r = self.r\n\n return r\n\n def evaluate(self, X):\n \"\"\"\n Evaluates the system on dataset X\n\n Args:\n X (np.matrix): dataset to evaluate on\n\n Returns:\n np.array: the output of the network\n \"\"\"\n wX = X[:, self.mask]*self.input_weights\n term_exp = -np.linalg.norm(wX - self.c[self.mask], axis=1)**2/self.r**2\n return self.beta*np.exp(term_exp)\n\n def mutate(self):\n \"\"\"\n Mutates the neuron\n \"\"\"\n r = self.random_state.random_sample()\n if r < 0.2:\n # centre creep\n self.c = self.random_state.normal(self.c, self.r)\n elif r < 0.4:\n # radius creep\n tmp = self.random_state.normal(self.r, np.var(self.ranges))\n if tmp > 0:\n self.r = tmp\n elif r < 0.6:\n # randomize centers\n self.c = self.random_state.random_sample(\n size=len(self.c))*self.ranges + self.range_mins\n elif r < 0.8:\n # randomize radii\n self.r = self.random_state.random_sample()*np.mean(self.ranges)\n else:\n # randomize output weight\n self.beta = self.random_state.normal(self.beta, self.Ob)\n\n def add_connection(self):\n \"\"\"\n Adds a random input connection to the neuron\n \"\"\"\n if len(self.mask) < self.d:\n d_set = set(range(self.d))\n mask_set = set(self.mask.tolist())\n domain = list(d_set.difference(mask_set))\n additional_elements = np.array(self.random_state.choice(domain))\n self.mask = np.hstack([self.mask, additional_elements])\n random_weight = (self.random_state.random_sample()-0.5)*self.Ib\n self.input_weights = np.hstack([self.input_weights, random_weight])\n\n def delete_connection(self):\n \"\"\"\n Deletes a random input connection\n \"\"\"\n if len(self.mask) > 1:\n idx = self.random_state.randint(len(self.mask))\n self.mask = np.delete(self.mask, idx)\n self.input_weights = np.delete(self.input_weights, idx)\n\n\nclass RBF(RandomStateMixin):\n \"\"\"\n RBF network abstraction\n \"\"\"\n\n def __init__(self,\n X,\n m_min,\n m_max,\n Ib,\n Ob,\n init_conn_mask,\n init_conn_weights,\n random_state=None):\n \"\"\"\n Initializes the RBF network\n\n Args:\n X (np.matrix): dataset to work with\n m_min (int): minimum number of hidden neurons\n m_max (int): maximum number of hidden neurons\n Ib (float): maximum absolute value of input weights\n Ob (float): maximum absolute value of output weights\n init_conn_mask (np.array): initial input connections\n init_conn_weights (np.array): initial input weights\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n self.X = X\n self.m_min = m_min\n self.m_max = m_max\n self.Ib = Ib\n self.Ob = Ob\n self.init_conn_mask = init_conn_mask\n self.init_conn_weights = init_conn_weights\n\n self.set_random_state(random_state)\n\n self.neurons = []\n self.range_mins = np.min(X, axis=0)\n self.ranges = np.max(X, axis=0) - self.range_mins\n\n # adding initial neurons\n num_neurons = self.random_state.randint(m_min, m_max)\n for _ in range(num_neurons):\n self.neurons.append(self.create_new_node())\n\n self.beta_0 = (self.random_state.random_sample()-0.5)*Ob\n\n def clone(self):\n \"\"\"\n Clones the entire network\n\n Returns:\n RBF: the cloned network\n \"\"\"\n r = RBF(self.X,\n self.m_min,\n self.m_max,\n self.Ib,\n self.Ob,\n self.init_conn_mask,\n self.init_conn_weights,\n random_state=self.random_state)\n r.neurons = [n.clone() for n in self.neurons]\n r.range_mins = self.range_mins.copy()\n r.ranges = self.ranges.copy()\n r.beta_0 = self.beta_0\n\n return r\n\n def create_new_node(self):\n \"\"\"\n Creates a new node.\n\n Returns:\n RBFNeuron: a new hidden neuron\n \"\"\"\n return RBFNeuron(self.X[self.random_state.randint(len(self.X))],\n self.Ib,\n self.Ob,\n self.ranges,\n self.range_mins,\n self.init_conn_mask,\n self.init_conn_weights,\n random_state=self.random_state)\n\n def update_data(self, X):\n \"\"\"\n Updates the data to work with\n \"\"\"\n self.X = X\n for n in self.neurons:\n n.X = X\n\n def improve_centers(self):\n \"\"\"\n Improves the center locations by kmeans clustering\n \"\"\"\n if len(np.unique(self.X, axis=0)) > len(self.neurons):\n cluster_init = np.vstack([n.c for n in self.neurons])\n kmeans = KMeans(n_clusters=len(self.neurons),\n init=cluster_init,\n n_init=1,\n max_iter=30,\n random_state=self.random_state)\n kmeans.fit(self.X)\n for i in range(len(self.neurons)):\n self.neurons[i].c = kmeans.cluster_centers_[i]\n\n def evaluate(self, X, y):\n \"\"\"\n Evaluates the target function\n\n Returns:\n float: the target function value\n \"\"\"\n evaluation = np.column_stack([n.evaluate(X) for n in self.neurons])\n f = self.beta_0 + np.sum(evaluation, axis=1)\n L_star = np.mean(abs(y[y == 1] - f[y == 1]))\n L_star += np.mean(abs(y[y == 0] - f[y == 0]))\n return L_star\n\n def mutation(self):\n \"\"\"\n Mutates the neurons\n\n Returns:\n RBF: a new, mutated RBF network\n \"\"\"\n rbf = self.clone()\n for n in rbf.neurons:\n n.mutate()\n return rbf\n\n def structural_mutation(self):\n \"\"\"\n Applies structural mutation\n\n Returns:\n RBF: a new, structurally mutated network\n \"\"\"\n # in the binary case the removal of output connections is the same as\n # removing hidden nodes\n rbf = self.clone()\n r = self.random_state.random_sample()\n if r < 0.5:\n if len(rbf.neurons) < rbf.m_max:\n rbf.neurons.append(rbf.create_new_node())\n elif len(rbf.neurons) > rbf.m_min:\n del rbf.neurons[self.random_state.randint(len(rbf.neurons))]\n else:\n rbf.neurons[self.random_state.randint(\n len(rbf.neurons))].delete_connection()\n rbf.neurons[self.random_state.randint(\n len(rbf.neurons))].add_connection()\n\n return rbf\n\n def recombine(self, rbf):\n \"\"\"\n Recombines two networks\n\n Args:\n rbf (RBF): another network\n\n Returns:\n RBF: the result of recombination\n \"\"\"\n # the order of neurons doesn't matter, so the logic can be simplified\n new = self.clone()\n if self.random_state.random_sample() < 0.5:\n n_random = self.random_state.randint(1, len(new.neurons))\n new_neurons_0 = self.random_state.choice(new.neurons, n_random)\n n_random = self.random_state.randint(1, len(rbf.neurons))\n new_neurons_1 = self.random_state.choice(rbf.neurons, n_random)\n new.neurons = [n.clone() for n in new_neurons_0]\n new.neurons.extend([n.clone() for n in new_neurons_1])\n while len(new.neurons) > self.m_max:\n del new.neurons[self.random_state.randint(len(new.neurons))]\n else:\n for i in range(len(new.neurons)):\n if self.random_state.random_sample() < 0.2:\n n_random = self.random_state.randint(len(rbf.neurons))\n new.neurons[i] = rbf.neurons[n_random].clone()\n return new\n\n\nclass DSRBF(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{dsrbf,\n title = \"A dynamic over-sampling procedure based on\n sensitivity for multi-class problems\",\n journal = \"Pattern Recognition\",\n volume = \"44\",\n number = \"8\",\n pages = \"1821 - 1833\",\n year = \"2011\",\n issn = \"0031-3203\",\n doi = \"https://doi.org/10.1016/j.patcog.2011.02.019\",\n author = \"Francisco Fernández-Navarro and César\n Hervás-Martínez and Pedro Antonio\n Gutiérrez\",\n keywords = \"Classification, Multi-class, Sensitivity,\n Accuracy, Memetic algorithm, Imbalanced\n datasets, Over-sampling method, SMOTE\"\n }\n\n Notes:\n * It is not entirely clear why J-1 output is supposed where J is the\n number of classes.\n * The fitness function is changed to a balanced mean loss, as I found\n that it just ignores classification on minority samples\n (class label +1) in the binary case.\n * The iRprop+ optimization is not implemented.\n * The original paper proposes using SMOTE incrementally. Instead of\n that, this implementation applies SMOTE to generate all samples\n needed in the sampling epochs and the evolution of RBF networks\n is used to select the sampling providing the best results.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_classifier,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_memetic]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n m_min=4,\n m_max=10,\n Ib=2,\n Ob=2,\n n_pop=500,\n n_init_pop=5000,\n n_iter=40,\n n_sampling_epoch=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors in the SMOTE sampling\n m_min (int): minimum number of hidden units\n m_max (int): maximum number of hidden units\n Ib (float): input weight range\n Ob (float): output weight range\n n_pop (int): size of population\n n_init_pop (int): size of initial population\n n_iter (int): number of iterations\n n_sampling_epoch (int): resampling after this many iterations\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(m_min, \"m_min\", 1)\n self.check_greater_or_equal(m_max, \"m_max\", 1)\n self.check_greater(Ib, \"Ib\", 0)\n self.check_greater(Ob, \"Ob\", 0)\n self.check_greater_or_equal(n_pop, \"n_pop\", 2)\n self.check_greater_or_equal(n_init_pop, \"n_pop\", 2)\n self.check_greater_or_equal(n_iter, \"n_iter\", 0)\n self.check_greater_or_equal(n_sampling_epoch, \"n_sampling_epoch\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.m_min = m_min\n self.m_max = m_max\n self.Ib = Ib\n self.Ob = Ob\n self.n_pop = n_pop\n self.n_init_pop = n_init_pop\n self.n_iter = n_iter\n self.n_sampling_epoch = n_sampling_epoch\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n # as the technique optimizes, it is unnecessary to check various\n # combinations except one specifying a decent workspace with a large\n # number of iterations\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'm_min': [4],\n 'm_max': [10],\n 'Ib': [2.0],\n 'Ob': [2.0],\n 'n_pop': [100],\n 'n_init_pop': [1000],\n 'n_iter': [40],\n 'n_sampling_epoch': [8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # Standardizing the data to let the network work with comparable\n # attributes\n ss = StandardScaler()\n X = ss.fit_transform(X)\n X_orig = X\n y_orig = y\n\n X, y = SMOTE(proportion=self.proportion,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n # generate initial connections and weights randomly\n domain = np.arange(len(X[0]))\n n_random = int(len(X[0])/2)\n init_conn_mask = self.random_state.choice(domain, n_random)\n init_conn_weights = self.random_state.random_sample(size=n_random)\n\n # setting epoch lengths\n epoch_len = int(self.n_iter/self.n_sampling_epoch)\n\n if len(X_orig) < self.m_min + 1:\n return X_orig.copy(), y_orig.copy()\n m_max = min(len(X_orig), self.m_max)\n\n # generating initial population\n def init_pop():\n return RBF(X,\n self.m_min,\n m_max,\n self.Ib,\n self.Ob,\n init_conn_mask,\n init_conn_weights,\n random_state=self.random_state)\n\n population = [init_pop() for _ in range(self.n_init_pop)]\n population = [[p, X, y, np.inf] for p in population]\n population = sorted([[p[0], p[1], p[2], p[0].evaluate(p[1], p[2])]\n for p in population], key=lambda x: x[3])\n population = population[:self.n_pop]\n\n # executing center improval in the hidden units\n for p in population:\n p[0].improve_centers()\n\n # executing the optimization process\n for iteration in range(self.n_iter):\n message = \"Iteration %d/%d, loss: %f, data size %d\"\n message = message % (iteration, self.n_iter, population[0][3],\n len(population[0][1]))\n _logger.info(self.__class__.__name__ + \": \" + message)\n # evaluating non-evaluated elements\n for p in population:\n if p[3] == np.inf:\n p[3] = p[0].evaluate(p[1], p[2])\n\n # sorting the population by the loss values\n population = sorted([p for p in population], key=lambda x: x[3])\n population = population[:self.n_pop]\n\n # determining the number of elements to be changed\n p_best = population[0]\n p_parametric_mut = population[:int(0.1*self.n_pop)]\n p_structural_mut = population[:int(0.9*self.n_pop-1)]\n p_recombination = population[:int(0.1*self.n_pop)]\n\n # executing mutation\n for p in p_parametric_mut:\n population.append([p[0].mutation(), p[1], p[2], np.inf])\n\n # executing structural mutation\n for p in p_structural_mut:\n population.append(\n [p[0].structural_mutation(), p[1], p[2], np.inf])\n\n # executing recombination\n for p in p_recombination:\n domain = range(len(p_recombination))\n p_rec_idx = self.random_state.choice(domain)\n p_rec = p_recombination[p_rec_idx][0]\n population.append([p[0].recombine(p_rec), p[1], p[2], np.inf])\n\n # do the sampling\n if iteration % epoch_len == 0:\n smote = SMOTE(proportion=self.proportion,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n X, y = smote.sample(X_orig, y_orig)\n for i in range(self.n_pop):\n tmp = [population[i][0].clone(), X, y, np.inf]\n tmp[0].update_data(X)\n tmp[0].improve_centers()\n population.append(tmp)\n\n # evaluate unevaluated elements of the population\n for p in population:\n if p[3] == np.inf:\n p[3] = p[0].evaluate(p[1], p[2])\n\n # sorting the population\n population = sorted([p for p in population],\n key=lambda x: x[3])[:self.n_pop]\n\n return ss.inverse_transform(p_best[1]), p_best[2]\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'm_min': self.m_min,\n 'm_max': self.m_max,\n 'Ib': self.Ib,\n 'Ob': self.Ob,\n 'n_pop': self.n_pop,\n 'n_init_pop': self.n_init_pop,\n 'n_iter': self.n_iter,\n 'n_sampling_epoch': self.n_sampling_epoch,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Gaussian_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{gaussian_smote,\n title={Gaussian-Based SMOTE Algorithm for Solving Skewed\n Class Distributions},\n author={Hansoo Lee and Jonggeun Kim and Sungshin Kim},\n journal={Int. J. Fuzzy Logic and Intelligent Systems},\n year={2017},\n volume={17},\n pages={229-234}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n sigma=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n sigma (float): variance\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater(sigma, \"sigma\", 0.0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.sigma = sigma\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'sigma': [0.5, 1.0, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # standardization applied to make sigma compatible with the data\n ss = StandardScaler()\n X_ss = ss.fit_transform(X)\n\n # fitting nearest neighbors model to find the minority neighbors of\n # minority samples\n X_min = X_ss[y == self.min_label]\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.randint(len(X_min))\n random_neighbor = self.random_state.choice(ind[idx][1:])\n s0 = self.sample_between_points(X_min[idx], X_min[random_neighbor])\n samples.append(self.random_state.normal(s0, self.sigma))\n\n return (np.vstack([X, ss.inverse_transform(np.vstack(samples))]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'sigma': self.sigma,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass kmeans_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{kmeans_smote,\n title = \"Improving imbalanced learning through a\n heuristic oversampling method based\n on k-means and SMOTE\",\n journal = \"Information Sciences\",\n volume = \"465\",\n pages = \"1 - 20\",\n year = \"2018\",\n issn = \"0020-0255\",\n doi = \"https://doi.org/10.1016/j.ins.2018.06.056\",\n author = \"Georgios Douzas and Fernando Bacao and\n Felix Last\",\n keywords = \"Class-imbalanced learning, Oversampling,\n Classification, Clustering, Supervised\n learning, Within-class imbalance\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_clusters=10,\n irt=2.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors\n n_clusters (int): number of clusters\n irt (float): imbalanced ratio threshold\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_clusters, \"n_clusters\", 1)\n self.check_greater_or_equal(irt, \"irt\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_clusters = n_clusters\n self.irt = irt\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_clusters': [2, 5, 10, 20, 50],\n 'irt': [0.5, 0.8, 1.0, 1.5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # applying kmeans clustering to all data\n n_clusters = min([self.n_clusters, len(X)])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X)\n\n # extracting clusters\n labels = kmeans.labels_\n clusters = [np.where(labels == li)[0] for li in range(n_clusters)]\n\n # cluster filtering\n def cluster_filter(c):\n numerator = np.sum(y[c] == self.maj_label) + 1\n denominator = np.sum(y[c] == self.min_label) + 1\n n_minority = np.sum(y[c] == self.min_label)\n return numerator/denominator < self.irt and n_minority > 1\n\n filt_clusters = [c for c in clusters if cluster_filter(c)]\n\n if len(filt_clusters) == 0:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"number of clusters after filtering is 0\")\n return X.copy(), y.copy()\n\n # Step 2 in the paper\n sparsity = []\n nearest_neighbors = []\n cluster_minority_ind = []\n for c in filt_clusters:\n # extract minority indices in the cluster\n minority_ind = c[y[c] == self.min_label]\n cluster_minority_ind.append(minority_ind)\n # compute distance matrix of minority samples in the cluster\n dm = pairwise_distances(X[minority_ind])\n min_count = len(minority_ind)\n # compute the average of distances\n avg_min_dist = (np.sum(dm) - dm.trace()) / \\\n (len(minority_ind)**2 - len(minority_ind))\n # compute sparsity (Step 4)\n sparsity.append(avg_min_dist**len(X[0])/min_count)\n # extract the nearest neighbors graph\n n_neighbors = min([len(minority_ind), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X[minority_ind])\n nearest_neighbors.append(nn.kneighbors(X[minority_ind]))\n\n # Step 5 - compute density of sampling\n weights = sparsity/np.sum(sparsity)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n # choose random cluster index and random minority element\n clust_ind = self.random_state.choice(\n np.arange(len(weights)), p=weights)\n idx = self.random_state.randint(\n len(cluster_minority_ind[clust_ind]))\n base_idx = cluster_minority_ind[clust_ind][idx]\n # choose random neighbor\n neighbor_cluster_indices = nearest_neighbors[clust_ind][1][idx][1:]\n domain = cluster_minority_ind[clust_ind][neighbor_cluster_indices]\n neighbor_idx = self.random_state.choice(domain)\n # sample\n X_a = X[base_idx]\n X_b = X[neighbor_idx]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_clusters': self.n_clusters,\n 'irt': self.irt,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Supervised_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{supervised_smote,\n author = {Hu, Jun AND He, Xue AND Yu, Dong-Jun AND\n Yang, Xi-Bei AND Yang, Jing-Yu AND Shen,\n Hong-Bin},\n journal = {PLOS ONE},\n publisher = {Public Library of Science},\n title = {A New Supervised Over-Sampling Algorithm\n with Application to Protein-Nucleotide\n Binding Residue Prediction},\n year = {2014},\n month = {09},\n volume = {9},\n url = {https://doi.org/10.1371/journal.pone.0107676},\n pages = {1-10},\n number = {9},\n doi = {10.1371/journal.pone.0107676}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_uses_classifier,\n OverSampling.cat_application]\n\n def __init__(self,\n proportion=1.0,\n th_lower=0.5,\n th_upper=1.0,\n classifier=RandomForestClassifier(n_estimators=50,\n n_jobs=1,\n random_state=5),\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n th_lower (float): lower bound of the confidence interval\n th_upper (float): upper bound of the confidence interval\n classifier (obj): classifier used to estimate class memberships\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_in_range(th_lower, \"th_lower\", [0, 1])\n self.check_in_range(th_upper, \"th_upper\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.th_lower = th_lower\n self.th_upper = th_upper\n self.classifier = classifier\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n classifiers = [RandomForestClassifier(n_estimators=50,\n n_jobs=1,\n random_state=5)]\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'th_lower': [0.3, 0.5, 0.8],\n 'th_upper': [1.0],\n 'classifier': classifiers}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # training the classifier\n self.classifier.fit(X, y)\n\n X_min = X[y == self.min_label]\n\n th_lower = self.th_lower\n\n # do the sampling\n samples = []\n n_trials = 1\n n_success = 1\n while len(samples) < n_to_sample:\n n_trials = n_trials + 1\n\n domain = range(len(X_min))\n x0, x1 = self.random_state.choice(domain, 2, replace=False)\n x0, x1 = X_min[x0], X_min[x1]\n sample = self.sample_between_points(x0, x1)\n probs = self.classifier.predict_proba(sample.reshape(1, -1))\n # extract probability\n class_column = np.where(self.classifier.classes_ == self.min_label)\n class_column = class_column[0][0]\n prob = probs[0][class_column]\n if prob >= th_lower and prob <= self.th_upper:\n samples.append(sample)\n n_success = n_success + 1\n\n # decreasing lower threshold if needed\n if n_success/n_trials < 0.02:\n th_lower = th_lower * 0.9\n n_success = 1\n n_trials = 1\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'th_lower': self.th_lower,\n 'th_upper': self.th_upper,\n 'classifier': self.classifier,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SN_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{sn_smote,\n author=\"Garc{\\'i}a, V.\n and S{\\'a}nchez, J. S.\n and Mart{\\'i}n-F{\\'e}lez, R.\n and Mollineda, R. A.\",\n title=\"Surrounding neighborhood-based SMOTE for\n learning from imbalanced data sets\",\n journal=\"Progress in Artificial Intelligence\",\n year=\"2012\",\n month=\"Dec\",\n day=\"01\",\n volume=\"1\",\n number=\"4\",\n pages=\"347--362\",\n issn=\"2192-6360\",\n doi=\"10.1007/s13748-012-0027-5\",\n url=\"https://doi.org/10.1007/s13748-012-0027-5\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (float): number of neighbors\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # the search for the k nearest centroid neighbors is limited for the\n # nearest 10*n_neighbors neighbors\n n_neighbors = min([self.n_neighbors*10, len(X_min)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # determining k nearest centroid neighbors\n ncn = np.zeros(shape=(len(X_min), self.n_neighbors)).astype(int)\n ncn_nums = np.zeros(len(X_min)).astype(int)\n\n # extracting nearest centroid neighbors\n for i in range(len(X_min)):\n # the first NCN neighbor is the first neighbor\n ncn[i, 0] = ind[i][1]\n\n # iterating through all neighbors and finding the one with smaller\n # centroid distance to X_min[i] than the previous set of neighbors\n n_cent = 1\n centroid = X_min[ncn[i, 0]]\n cent_dist = np.linalg.norm(centroid - X_min[i])\n j = 2\n while j < len(ind[i]) and n_cent < self.n_neighbors:\n new_cent_dist = np.linalg.norm(\n (centroid + X_min[ind[i][j]])/(n_cent + 1) - X_min[i])\n\n # checking if new nearest centroid neighbor found\n if new_cent_dist < cent_dist:\n centroid = centroid + X_min[ind[i][j]]\n ncn[i, n_cent] = ind[i][j]\n n_cent = n_cent + 1\n cent_dist = new_cent_dist\n j = j + 1\n\n # registering the number of nearest centroid neighbors found\n ncn_nums[i] = n_cent\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n random_idx = self.random_state.randint(len(X_min))\n random_neighbor_idx = self.random_state.choice(\n ncn[random_idx][:ncn_nums[random_idx]])\n samples.append(self.sample_between_points(\n X_min[random_idx], X_min[random_neighbor_idx]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass CCR(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{ccr,\n author = {Koziarski, Michał and Wozniak, Michal},\n year = {2017},\n month = {12},\n pages = {727–736},\n title = {CCR: A combined cleaning and resampling algorithm\n for imbalanced data classification},\n volume = {27},\n journal = {International Journal of Applied Mathematics\n and Computer Science}\n }\n\n Notes:\n * Adapted from https://github.com/michalkoziarski/CCR\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n energy=1.0,\n scaling=0.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n energy (float): energy parameter\n scaling (float): scaling factor\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(energy, \"energy\", 0)\n self.check_greater_or_equal(scaling, \"scaling\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.energy = energy\n self.scaling = scaling\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'energy': [0.001, 0.0025, 0.005,\n 0.01, 0.025, 0.05, 0.1,\n 0.25, 0.5, 1.0, 2.5, 5.0,\n 10.0, 25.0, 50.0, 100.0],\n 'scaling': [0.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n def taxicab_sample(n, r):\n sample = []\n random_numbers = self.random_state.rand(n)\n\n for i in range(n):\n # spread = r - np.sum(np.abs(sample))\n spread = r\n if len(sample) > 0:\n spread -= abs(sample[-1])\n sample.append(spread * (2 * random_numbers[i] - 1))\n\n return self.random_state.permutation(sample)\n\n minority = X[y == self.min_label]\n majority = X[y == self.maj_label]\n\n energy = self.energy * (X.shape[1] ** self.scaling)\n\n distances = pairwise_distances(minority, majority, metric='l1')\n\n radii = np.zeros(len(minority))\n translations = np.zeros(majority.shape)\n\n for i in range(len(minority)):\n minority_point = minority[i]\n remaining_energy = energy\n r = 0.0\n sorted_distances = np.argsort(distances[i])\n current_majority = 0\n\n while True:\n if current_majority > len(majority):\n break\n\n if current_majority == len(majority):\n if current_majority == 0:\n radius_change = remaining_energy / \\\n (current_majority + 1.0)\n else:\n radius_change = remaining_energy / current_majority\n\n r += radius_change\n break\n\n radius_change = remaining_energy / (current_majority + 1.0)\n\n dist = distances[i, sorted_distances[current_majority]]\n if dist >= r + radius_change:\n r += radius_change\n break\n else:\n if current_majority == 0:\n last_distance = 0.0\n else:\n cm1 = current_majority - 1\n last_distance = distances[i, sorted_distances[cm1]]\n\n curr_maj_idx = sorted_distances[current_majority]\n radius_change = distances[i, curr_maj_idx] - last_distance\n r += radius_change\n decrease = radius_change * (current_majority + 1.0)\n remaining_energy -= decrease\n current_majority += 1\n\n radii[i] = r\n\n for j in range(current_majority):\n majority_point = majority[sorted_distances[j]].astype(float)\n d = distances[i, sorted_distances[j]]\n\n if d < 1e-20:\n n_maj_point = len(majority_point)\n r_num = self.random_state.rand(n_maj_point)\n r_num = 1e-6 * r_num + 1e-6\n r_sign = self.random_state.choice([-1.0, 1.0], n_maj_point)\n majority_point += r_num * r_sign\n d = np.sum(np.abs(minority_point - majority_point))\n\n translation = (r - d) / d * (majority_point - minority_point)\n translations[sorted_distances[j]] += translation\n\n majority = majority.astype(float)\n majority += translations\n\n appended = []\n for i in range(len(minority)):\n minority_point = minority[i]\n synthetic_samples = n_to_sample / (radii[i] * np.sum(1.0 / radii))\n synthetic_samples = int(np.round(synthetic_samples))\n r = radii[i]\n\n for _ in range(synthetic_samples):\n appended.append(minority_point +\n taxicab_sample(len(minority_point), r))\n\n if len(appended) == 0:\n _logger.info(\"No samples were added\")\n return X.copy(), y.copy()\n\n return (np.vstack([X, np.vstack(appended)]),\n np.hstack([y, np.repeat(self.min_label, len(appended))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'energy': self.energy,\n 'scaling': self.scaling,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ANS(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{ans,\n author = {Siriseriwan, W and Sinapiromsaran, Krung},\n year = {2017},\n month = {09},\n pages = {565-576},\n title = {Adaptive neighbor synthetic minority oversampling\n technique under 1NN outcast handling},\n volume = {39},\n booktitle = {Songklanakarin Journal of Science and\n Technology}\n }\n\n Notes:\n * The method is not prepared for the case when there is no c satisfying\n the condition in line 25 of the algorithm, fixed.\n * The method is not prepared for empty Pused sets, fixed.\n \"\"\"\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_density_based]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [\n 0.1, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # outcast extraction algorithm\n\n # maximum C value\n C_max = int(0.25*len(X))\n\n # finding the first minority neighbor of minority samples\n nn = NearestNeighbors(n_neighbors=2, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # extracting the distances of first minority neighbors from minority\n # samples\n first_pos_neighbor_distances = dist[:, 1]\n\n # fitting another nearest neighbors model to extract majority\n # samples in the neighborhoods of minority samples\n nn = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn.fit(X)\n\n # extracting the number of majority samples in the neighborhood of\n # minority samples\n out_border = []\n for i in range(len(X_min)):\n x = X_min[i].reshape(1, -1)\n ind = nn.radius_neighbors(x,\n first_pos_neighbor_distances[i],\n return_distance=False)\n out_border.append(np.sum(y[ind[0]] == self.maj_label))\n\n out_border = np.array(out_border)\n\n # finding the optimal C value by comparing the number of outcast\n # minority samples when traversing the range [1, C_max]\n n_oc_m1 = -1\n C = 0\n best_diff = np.inf\n for c in range(1, C_max):\n n_oc = np.sum(out_border >= c)\n if abs(n_oc - n_oc_m1) < best_diff:\n best_diff = abs(n_oc - n_oc_m1)\n C = n_oc\n n_oc_m1 = n_oc\n\n # determining the set of minority samples Pused\n Pused = np.where(out_border < C)[0]\n\n # Adaptive neighbor SMOTE algorithm\n\n # checking if there are minority samples left\n if len(Pused) == 0:\n _logger.info(self.__class__.__name__ + \": \" + \"Pused is empty\")\n return X.copy(), y.copy()\n\n # finding the maximum distances of first positive neighbors\n eps = np.max(first_pos_neighbor_distances[Pused])\n\n # fitting nearest neighbors model to find nearest minority samples in\n # the neighborhoods of minority samples\n nn = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn.fit(X_min[Pused])\n ind = nn.radius_neighbors(X_min[Pused], eps, return_distance=False)\n\n # extracting the number of positive samples in the neighborhoods\n Np = np.array([len(i) for i in ind])\n\n if np.all(Np == 1):\n message = \"all samples have only 1 neighbor in the given radius\"\n _logger.warning(self.__class__.__name__ + \": \" + message)\n return X.copy(), y.copy()\n\n # determining the distribution used to generate samples\n distribution = Np/np.sum(Np)\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n random_idx = self.random_state.choice(\n np.arange(len(Pused)), p=distribution)\n if len(ind[random_idx]) > 1:\n random_neig_idx = self.random_state.choice(ind[random_idx])\n while random_neig_idx == random_idx:\n random_neig_idx = self.random_state.choice(ind[random_idx])\n X_a = X_min[Pused[random_idx]]\n X_b = X_min[Pused[random_neig_idx]]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass cluster_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{cluster_SMOTE,\n author={Cieslak, D. A. and Chawla, N. V. and\n Striegel, A.},\n booktitle={2006 IEEE International Conference\n on Granular Computing},\n title={Combating imbalance in network\n intrusion datasets},\n year={2006},\n volume={},\n number={},\n pages={732-737},\n keywords={Intelligent networks;Intrusion detection;\n Telecommunication traffic;Data mining;\n Computer networks;Data security;\n Machine learning;Counting circuits;\n Computer security;Humans},\n doi={10.1109/GRC.2006.1635905},\n ISSN={},\n month={May}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=3,\n n_clusters=3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in SMOTE\n n_clusters (int): number of clusters\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_clusters, \"n_components\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_clusters = n_clusters\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_clusters': [3, 5, 7, 9]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n X_min = X[y == self.min_label]\n\n # determining the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n n_clusters = min([len(X_min), self.n_clusters])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X_min)\n cluster_labels = kmeans.labels_\n unique_labels = np.unique(cluster_labels)\n\n # creating nearest neighbors objects for each cluster\n cluster_indices = [np.where(cluster_labels == c)[0]\n for c in unique_labels]\n\n def nneighbors(idx):\n n_neighbors = min([self.n_neighbors, len(cluster_indices[idx])])\n nn = NearestNeighbors(n_neighbors=n_neighbors)\n return nn.fit(X_min[cluster_indices[idx]])\n\n cluster_nns = [nneighbors(idx) for idx in range(len(cluster_indices))]\n\n if max([len(c) for c in cluster_indices]) <= 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"All clusters contain 1 element\")\n return X.copy(), y.copy()\n\n # generating the samples\n samples = []\n while len(samples) < n_to_sample:\n cluster_idx = self.random_state.randint(len(cluster_indices))\n if len(cluster_indices[cluster_idx]) <= 1:\n continue\n random_idx = self.random_state.randint(\n len(cluster_indices[cluster_idx]))\n sample_a = X_min[cluster_indices[cluster_idx]][random_idx]\n dist, indices = cluster_nns[cluster_idx].kneighbors(\n sample_a.reshape(1, -1))\n sample_b_idx = self.random_state.choice(\n cluster_indices[cluster_idx][indices[0][1:]])\n sample_b = X_min[sample_b_idx]\n samples.append(self.sample_between_points(sample_a, sample_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_clusters': self.n_clusters,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MulticlassOversampling(StatisticsMixin):\n \"\"\"\n Carries out multiclass oversampling\n\n Example::\n\n import smote_variants as sv\n import sklearn.datasets as datasets\n\n dataset= datasets.load_wine()\n\n oversampler= sv.MulticlassOversampling(sv.distance_SMOTE())\n\n X_samp, y_samp= oversampler.sample(dataset['data'], dataset['target'])\n \"\"\"\n\n def __init__(self,\n oversampler=SMOTE(random_state=2),\n strategy=\"eq_1_vs_many_successive\"):\n \"\"\"\n Constructor of the multiclass oversampling object\n\n Args:\n oversampler (obj): an oversampling object\n strategy (str/obj): a multiclass oversampling strategy, currently\n 'eq_1_vs_many_successive' or\n 'equalize_1_vs_many'\n \"\"\"\n self.oversampler = oversampler\n self.strategy = strategy\n\n def sample_equalize_1_vs_many(self, X, y):\n \"\"\"\n Does the sample generation by oversampling each minority class to the\n cardinality of the majority class using all original samples in each\n run.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n\n message = \"Running multiclass oversampling with strategy %s\"\n message = message % str(self.strategy)\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n if 'proportion' not in self.oversampler.get_params():\n message = (\"Multiclass oversampling strategy %s cannot be \"\n \"used with oversampling techniques without proportion\"\n \" parameter\")\n message = message % str(self.strategy)\n raise ValueError(message)\n\n # extract class label statistics\n self.class_label_statistics(X, y)\n\n # sort labels by number of samples\n class_labels = self.class_stats.keys()\n class_labels = sorted(class_labels, key=lambda x: -self.class_stats[x])\n\n majority_class_label = class_labels[0]\n\n # determining the majority class data\n X_maj = X[y == majority_class_label]\n\n # dict to store the results\n results = {}\n results[majority_class_label] = X_maj.copy()\n\n # running oversampling for all minority classes against all oversampled\n # classes\n for i in range(1, len(class_labels)):\n message = \"Sampling minority class with label: %d\"\n message = message % class_labels[i]\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n # extract current minority class\n minority_class_label = class_labels[i]\n X_min = X[y == minority_class_label]\n X_maj = X[y != minority_class_label]\n\n # prepare data to pass to oversampling\n X_training = np.vstack([X_maj, X_min])\n y_training = np.hstack(\n [np.repeat(0, len(X_maj)), np.repeat(1, len(X_min))])\n\n # prepare parameters by properly setting the proportion value\n params = self.oversampler.get_params()\n\n num_to_generate = self.class_stats[majority_class_label] - \\\n self.class_stats[class_labels[i]]\n num_to_gen_to_all = len(X_maj) - self.class_stats[class_labels[i]]\n\n params['proportion'] = num_to_generate/num_to_gen_to_all\n\n # instantiating new oversampling object with the proper proportion\n # parameter\n oversampler = self.oversampler.__class__(**params)\n\n # executing the sampling\n X_samp, y_samp = oversampler.sample(X_training, y_training)\n\n # registaring the newly oversampled minority class in the output\n # set\n results[class_labels[i]] = X_samp[len(\n X_training):][y_samp[len(X_training):] == 1]\n\n # constructing the output set\n X_final = results[class_labels[1]]\n y_final = np.repeat(class_labels[1], len(results[class_labels[1]]))\n\n for i in range(2, len(class_labels)):\n X_final = np.vstack([X_final, results[class_labels[i]]])\n y_new = np.repeat(class_labels[i], len(results[class_labels[i]]))\n y_final = np.hstack([y_final, y_new])\n\n return np.vstack([X, X_final]), np.hstack([y, y_final])\n\n def sample_equalize_1_vs_many_successive(self, X, y):\n \"\"\"\n Does the sample generation by oversampling each minority class\n successively to the cardinality of the majority class,\n incorporating the results of previous oversamplings.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n\n message = \"Running multiclass oversampling with strategy %s\"\n message = message % str(self.strategy)\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n if 'proportion' not in self.oversampler.get_params():\n message = (\"Multiclass oversampling strategy %s cannot be used\"\n \" with oversampling techniques without proportion\"\n \" parameter\") % str(self.strategy)\n raise ValueError(message)\n\n # extract class label statistics\n self.class_label_statistics(X, y)\n\n # sort labels by number of samples\n class_labels = self.class_stats.keys()\n class_labels = sorted(class_labels, key=lambda x: -self.class_stats[x])\n\n majority_class_label = class_labels[0]\n\n # determining the majority class data\n X_maj = X[y == majority_class_label]\n\n # dict to store the results\n results = {}\n results[majority_class_label] = X_maj.copy()\n\n # running oversampling for all minority classes against all\n # oversampled classes\n for i in range(1, len(class_labels)):\n message = \"Sampling minority class with label: %d\"\n message = message % class_labels[i]\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n # extract current minority class\n minority_class_label = class_labels[i]\n X_min = X[y == minority_class_label]\n\n # prepare data to pass to oversampling\n X_training = np.vstack([X_maj, X_min])\n y_training = np.hstack(\n [np.repeat(0, len(X_maj)), np.repeat(1, len(X_min))])\n\n # prepare parameters by properly setting the proportion value\n params = self.oversampler.get_params()\n\n n_majority = self.class_stats[majority_class_label]\n n_class_i = self.class_stats[class_labels[i]]\n num_to_generate = n_majority - n_class_i\n\n num_to_gen_to_all = i * n_majority - n_class_i\n\n params['proportion'] = num_to_generate/num_to_gen_to_all\n\n # instantiating new oversampling object with the proper proportion\n # parameter\n oversampler = self.oversampler.__class__(**params)\n\n # executing the sampling\n X_samp, y_samp = oversampler.sample(X_training, y_training)\n\n # adding the newly oversampled minority class to the majority data\n X_maj = np.vstack([X_maj, X_samp[y_samp == 1]])\n\n # registaring the newly oversampled minority class in the output\n # set\n result_mask = y_samp[len(X_training):] == 1\n results[class_labels[i]] = X_samp[len(X_training):][result_mask]\n\n # constructing the output set\n X_final = results[class_labels[1]]\n y_final = np.repeat(class_labels[1], len(results[class_labels[1]]))\n\n for i in range(2, len(class_labels)):\n X_final = np.vstack([X_final, results[class_labels[i]]])\n y_new = np.repeat(class_labels[i], len(results[class_labels[i]]))\n y_final = np.hstack([y_final, y_new])\n\n return np.vstack([X, X_final]), np.hstack([y, y_final])\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the oversampling strategy.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n\n if self.strategy == \"eq_1_vs_many_successive\":\n return self.sample_equalize_1_vs_many_successive(X, y)\n elif self.strategy == \"equalize_1_vs_many\":\n return self.sample_equalize_1_vs_many(X, y)\n else:\n message = \"Multiclass oversampling startegy %s not implemented.\"\n message = message % self.strategy\n raise ValueError(message)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the multiclass oversampling object\n \"\"\"\n return {'oversampler': self.oversampler, 'strategy': self.strategy}\n\n\nclass OversamplingClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"\n This class wraps an oversampler and a classifier, making it compatible\n with sklearn based pipelines.\n \"\"\"\n\n def __init__(self, oversampler, classifier):\n \"\"\"\n Constructor of the wrapper.\n\n Args:\n oversampler (obj): an oversampler object\n classifier (obj): an sklearn-compatible classifier\n \"\"\"\n\n self.oversampler = oversampler\n self.classifier = classifier\n\n def fit(self, X, y=None):\n \"\"\"\n Carries out oversampling and fits the classifier.\n\n Args:\n X (np.ndarray): feature vectors\n y (np.array): target values\n\n Returns:\n obj: the object itself\n \"\"\"\n\n X_samp, y_samp = self.oversampler.sample(X, y)\n self.classifier.fit(X_samp, y_samp)\n\n return self\n\n def predict(self, X):\n \"\"\"\n Carries out the predictions.\n\n Args:\n X (np.ndarray): feature vectors\n \"\"\"\n\n return self.classifier.predict(X)\n\n def predict_proba(self, X):\n \"\"\"\n Carries out the predictions with probability estimations.\n\n Args:\n X (np.ndarray): feature vectors\n \"\"\"\n\n return self.classifier.predict_proba(X)\n\n def get_params(self, deep=True):\n \"\"\"\n Returns the dictionary of parameters.\n\n Args:\n deep (bool): wether to return parameters with deep discovery\n\n Returns:\n dict: the dictionary of parameters\n \"\"\"\n\n return {'oversampler': self.oversampler, 'classifier': self.classifier}\n\n def set_params(self, **parameters):\n \"\"\"\n Sets the parameters.\n\n Args:\n parameters (dict): the parameters to set.\n\n Returns:\n obj: the object itself\n \"\"\"\n\n for parameter, value in parameters.items():\n setattr(self, parameter, value)\n\n return self\n\n\nclass MLPClassifierWrapper:\n \"\"\"\n Wrapper over MLPClassifier of sklearn to provide easier parameterization\n \"\"\"\n\n def __init__(self,\n activation='relu',\n hidden_layer_fraction=0.1,\n alpha=0.0001,\n random_state=None):\n \"\"\"\n Constructor of the MLPClassifier\n\n Args:\n activation (str): name of the activation function\n hidden_layer_fraction (float): fraction of the hidden neurons of\n the number of input dimensions\n alpha (float): alpha parameter of the MLP classifier\n random_state (int/np.random.RandomState/None): initializer of the\n random state\n \"\"\"\n self.activation = activation\n self.hidden_layer_fraction = hidden_layer_fraction\n self.alpha = alpha\n self.random_state = random_state\n\n def fit(self, X, y):\n \"\"\"\n Fit the model to the data\n\n Args:\n X (np.ndarray): features\n y (np.array): target labels\n\n Returns:\n obj: the MLPClassifierWrapper object\n \"\"\"\n hidden_layer_size = max([1, int(len(X[0])*self.hidden_layer_fraction)])\n self.model = MLPClassifier(activation=self.activation,\n hidden_layer_sizes=(hidden_layer_size,),\n alpha=self.alpha,\n random_state=self.random_state).fit(X, y)\n return self\n\n def predict(self, X):\n \"\"\"\n Predicts the labels of the unseen data\n\n Args:\n X (np.ndarray): unseen features\n\n Returns:\n np.array: predicted labels\n \"\"\"\n return self.model.predict(X)\n\n def predict_proba(self, X):\n \"\"\"\n Predicts the class probabilities of the unseen data\n\n Args:\n X (np.ndarray): unseen features\n\n Returns:\n np.matrix: predicted class probabilities\n \"\"\"\n return self.model.predict_proba(X)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns the parameters of the classifier.\n\n Returns:\n dict: the parameters of the object\n \"\"\"\n return {'activation': self.activation,\n 'hidden_layer_fraction': self.hidden_layer_fraction,\n 'alpha': self.alpha,\n 'random_state': self.random_state}\n\n def copy(self):\n \"\"\"\n Creates a copy of the classifier.\n\n Returns:\n obj: a copy of the classifier\n \"\"\"\n return MLPClassifierWrapper(**self.get_params())\n\n\nclass Folding():\n \"\"\"\n Cache-able folding of dataset for cross-validation\n \"\"\"\n\n def __init__(self, dataset, validator, cache_path=None, random_state=None):\n \"\"\"\n Constructor of Folding object\n\n Args:\n dataset (dict): dataset dictionary with keys 'data', 'target'\n and 'DESCR'\n validator (obj): cross-validator object\n cache_path (str): path to cache directory\n random_state (int/np.random.RandomState/None): initializer of\n the random state\n \"\"\"\n self.dataset = dataset\n self.db_name = self.dataset['name']\n self.validator = validator\n self.cache_path = cache_path\n self.filename = 'folding_' + self.db_name + '.pickle'\n self.db_size = len(dataset['data'])\n self.db_n_attr = len(dataset['data'][0])\n self.imbalanced_ratio = np.sum(\n self.dataset['target'] == 0)/np.sum(self.dataset['target'] == 1)\n self.random_state = random_state\n\n def do_folding(self):\n \"\"\"\n Does the folding or reads it from file if already available\n\n Returns:\n list(tuple): list of tuples of X_train, y_train, X_test, y_test\n objects\n \"\"\"\n\n self.validator.random_state = self.random_state\n\n if not hasattr(self, 'folding'):\n cond_cache_none = self.cache_path is None\n if not cond_cache_none:\n filename = os.path.join(self.cache_path, self.filename)\n cond_file_not_exists = not os.path.isfile(filename)\n else:\n cond_file_not_exists = False\n\n if cond_cache_none or cond_file_not_exists:\n _logger.info(self.__class__.__name__ +\n (\" doing folding %s\" % self.filename))\n\n self.folding = {}\n self.folding['folding'] = []\n self.folding['db_size'] = len(self.dataset['data'])\n self.folding['db_n_attr'] = len(self.dataset['data'][0])\n n_maj = np.sum(self.dataset['target'] == 0)\n n_min = np.sum(self.dataset['target'] == 1)\n self.folding['imbalanced_ratio'] = n_maj / n_min\n\n X = self.dataset['data']\n y = self.dataset['target']\n\n data = self.dataset['data']\n target = self.dataset['target']\n\n for train, test in self.validator.split(data, target, target):\n folding = (X[train], y[train], X[test], y[test])\n self.folding['folding'].append(folding)\n if self.cache_path is not None:\n _logger.info(self.__class__.__name__ +\n (\" dumping to file %s\" % self.filename))\n random_filename = np.random.randint(1000000)\n random_filename = str(random_filename) + '.pickle'\n random_filename = os.path.join(self.cache_path,\n random_filename)\n pickle.dump(self.folding, open(random_filename, \"wb\"))\n os.rename(random_filename, os.path.join(\n self.cache_path, self.filename))\n else:\n _logger.info(self.__class__.__name__ +\n (\" reading from file %s\" % self.filename))\n self.folding = pickle.load(\n open(os.path.join(self.cache_path, self.filename), \"rb\"))\n return self.folding\n\n def get_params(self, deep=False):\n return {'db_name': self.db_name}\n\n def descriptor(self):\n return str(self.get_params())\n\n\nclass Sampling():\n \"\"\"\n Cache-able sampling of dataset folds\n \"\"\"\n\n def __init__(self,\n folding,\n sampler,\n sampler_parameters,\n scaler,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n folding (obj): Folding object\n sampler (class): class of a sampler object\n sampler_parameters (dict): a parameter combination for the sampler\n object\n scaler (obj): scaler object\n random_state (int/np.random.RandomState/None): initializer of the\n random state\n \"\"\"\n self.folding = folding\n self.db_name = folding.db_name\n self.sampler = sampler\n self.sampler_parameters = sampler_parameters\n self.sampler_parameters['random_state'] = random_state\n self.scaler = scaler\n self.cache_path = folding.cache_path\n self.filename = self.standardized_filename('sampling')\n self.random_state = random_state\n\n def standardized_filename(self,\n prefix,\n db_name=None,\n sampler=None,\n sampler_parameters=None):\n \"\"\"\n standardizes the filename\n\n Args:\n filename (str): filename\n\n Returns:\n str: standardized name\n \"\"\"\n import hashlib\n\n db_name = (db_name or self.db_name)\n\n sampler = (sampler or self.sampler)\n sampler = sampler.__name__\n sampler_parameters = sampler_parameters or self.sampler_parameters\n _logger.info(str(sampler_parameters))\n from collections import OrderedDict\n sampler_parameters_ordered = OrderedDict()\n for k in sorted(list(sampler_parameters.keys())):\n sampler_parameters_ordered[k] = sampler_parameters[k]\n\n message = \" sampler parameter string \"\n message = message + str(sampler_parameters_ordered)\n _logger.info(self.__class__.__name__ + message)\n sampler_parameter_str = hashlib.md5(\n str(sampler_parameters_ordered).encode('utf-8')).hexdigest()\n\n filename = '_'.join(\n [prefix, db_name, sampler, sampler_parameter_str]) + '.pickle'\n filename = re.sub('[\"\\\\,:(){}]', '', filename)\n filename = filename.replace(\"'\", '')\n filename = filename.replace(\": \", \"_\")\n filename = filename.replace(\" \", \"_\")\n filename = filename.replace(\"\\n\", \"_\")\n\n return filename\n\n def cache_sampling(self):\n try:\n import mkl\n mkl.set_num_threads(1)\n _logger.info(self.__class__.__name__ +\n (\" mkl thread number set to 1 successfully\"))\n except Exception as e:\n _logger.info(self.__class__.__name__ +\n (\" setting mkl thread number didn't succeed\"))\n _logger.info(str(e))\n\n if not os.path.isfile(os.path.join(self.cache_path, self.filename)):\n # if the sampled dataset does not exist\n sampler_categories = self.sampler.categories\n is_extensive = OverSampling.cat_extensive in sampler_categories\n has_proportion = 'proportion' in self.sampler_parameters\n higher_prop_sampling_avail = None\n\n if is_extensive and has_proportion:\n proportion = self.sampler_parameters['proportion']\n all_pc = self.sampler.parameter_combinations()\n all_proportions = np.unique([p['proportion'] for p in all_pc])\n all_proportions = all_proportions[all_proportions > proportion]\n\n for p in all_proportions:\n tmp_par = self.sampler_parameters.copy()\n tmp_par['proportion'] = p\n tmp_filename = self.standardized_filename(\n 'sampling', self.db_name, self.sampler, tmp_par)\n\n filename = os.path.join(self.cache_path, tmp_filename)\n if os.path.isfile(filename):\n higher_prop_sampling_avail = (p, tmp_filename)\n break\n\n if (not is_extensive or not has_proportion or\n (is_extensive and has_proportion and\n higher_prop_sampling_avail is None)):\n _logger.info(self.__class__.__name__ + \" doing sampling\")\n begin = time.time()\n sampling = []\n folds = self.folding.do_folding()\n for X_train, y_train, X_test, y_test in folds['folding']:\n s = self.sampler(**self.sampler_parameters)\n\n if self.scaler is not None:\n print(self.scaler.__class__.__name__)\n X_train = self.scaler.fit_transform(X_train, y_train)\n X_samp, y_samp = s.sample_with_timing(X_train, y_train)\n\n if hasattr(s, 'transform'):\n X_test_trans = s.preprocessing_transform(X_test)\n else:\n X_test_trans = X_test.copy()\n\n if self.scaler is not None:\n X_samp = self.scaler.inverse_transform(X_samp)\n\n sampling.append((X_samp, y_samp, X_test_trans, y_test))\n runtime = time.time() - begin\n else:\n higher_prop, higher_prop_filename = higher_prop_sampling_avail\n message = \" reading and resampling from file %s to %s\"\n message = message % (higher_prop_filename, self.filename)\n _logger.info(self.__class__.__name__ + message)\n filename = os.path.join(self.cache_path, higher_prop_filename)\n tmp_results = pickle.load(open(filename, 'rb'))\n tmp_sampling = tmp_results['sampling']\n tmp_runtime = tmp_results['runtime']\n\n sampling = []\n folds = self.folding.do_folding()\n nums = [len(X_train) for X_train, _, _, _ in folds['folding']]\n i = 0\n for X_train, y_train, X_test, y_test in tmp_sampling:\n new_num = (len(X_train) - nums[i])/higher_prop*proportion\n new_num = int(new_num)\n offset = nums[i] + new_num\n X_offset = X_train[:offset]\n y_offset = y_train[:offset]\n sampling.append((X_offset, y_offset, X_test, y_test))\n i = i + 1\n runtime = tmp_runtime/p*proportion\n\n results = {}\n results['sampling'] = sampling\n results['runtime'] = runtime\n results['db_size'] = folds['db_size']\n results['db_n_attr'] = folds['db_n_attr']\n results['imbalanced_ratio'] = folds['imbalanced_ratio']\n\n _logger.info(self.__class__.__name__ +\n (\" dumping to file %s\" % self.filename))\n\n random_filename = np.random.randint(1000000)\n random_filename = str(random_filename) + '.pickle'\n random_filename = os.path.join(self.cache_path, random_filename)\n pickle.dump(results, open(random_filename, \"wb\"))\n os.rename(random_filename, os.path.join(\n self.cache_path, self.filename))\n\n def do_sampling(self):\n self.cache_sampling()\n results = pickle.load(\n open(os.path.join(self.cache_path, self.filename), 'rb'))\n return results\n\n def get_params(self, deep=False):\n return {'folding': self.folding.get_params(),\n 'sampler_name': self.sampler.__name__,\n 'sampler_parameters': self.sampler_parameters}\n\n def descriptor(self):\n return str(self.get_params())\n\n\nclass Evaluation():\n \"\"\"\n Cache-able evaluation of classifier on sampling\n \"\"\"\n\n def __init__(self,\n sampling,\n classifiers,\n n_threads=None,\n random_state=None):\n \"\"\"\n Constructor of an Evaluation object\n\n Args:\n sampling (obj): Sampling object\n classifiers (list(obj)): classifier objects\n n_threads (int/None): number of threads\n random_state (int/np.random.RandomState/None): random state\n initializer\n \"\"\"\n self.sampling = sampling\n self.classifiers = classifiers\n self.n_threads = n_threads\n self.cache_path = sampling.cache_path\n self.filename = self.sampling.standardized_filename('eval')\n self.random_state = random_state\n\n self.labels = []\n for i in range(len(classifiers)):\n from collections import OrderedDict\n sampling_parameters = OrderedDict()\n sp = self.sampling.sampler_parameters\n for k in sorted(list(sp.keys())):\n sampling_parameters[k] = sp[k]\n cp = classifiers[i].get_params()\n classifier_parameters = OrderedDict()\n for k in sorted(list(cp.keys())):\n classifier_parameters[k] = cp[k]\n\n label = str((self.sampling.db_name, sampling_parameters,\n classifiers[i].__class__.__name__,\n classifier_parameters))\n self.labels.append(label)\n\n print(self.labels)\n\n def calculate_metrics(self, all_pred, all_test, all_folds):\n \"\"\"\n Calculates metrics of binary classifiction\n\n Args:\n all_pred (np.matrix): predicted probabilities\n all_test (np.matrix): true labels\n\n Returns:\n dict: all metrics of binary classification\n \"\"\"\n\n results = {}\n if all_pred is not None:\n all_pred_labels = np.apply_along_axis(\n lambda x: np.argmax(x), 1, all_pred)\n\n results['tp'] = np.sum(np.logical_and(\n np.equal(all_test, all_pred_labels), (all_test == 1)))\n results['tn'] = np.sum(np.logical_and(\n np.equal(all_test, all_pred_labels), (all_test == 0)))\n results['fp'] = np.sum(np.logical_and(np.logical_not(\n np.equal(all_test, all_pred_labels)), (all_test == 0)))\n results['fn'] = np.sum(np.logical_and(np.logical_not(\n np.equal(all_test, all_pred_labels)), (all_test == 1)))\n results['p'] = results['tp'] + results['fn']\n results['n'] = results['fp'] + results['tn']\n results['acc'] = (results['tp'] + results['tn']) / \\\n (results['p'] + results['n'])\n results['sens'] = results['tp']/results['p']\n results['spec'] = results['tn']/results['n']\n results['ppv'] = results['tp']/(results['tp'] + results['fp'])\n results['npv'] = results['tn']/(results['tn'] + results['fn'])\n results['fpr'] = 1.0 - results['spec']\n results['fdr'] = 1.0 - results['ppv']\n results['fnr'] = 1.0 - results['sens']\n results['bacc'] = (results['tp']/results['p'] +\n results['tn']/results['n'])/2.0\n results['gacc'] = np.sqrt(\n results['tp']/results['p']*results['tn']/results['n'])\n results['f1'] = 2*results['tp'] / \\\n (2*results['tp'] + results['fp'] + results['fn'])\n mcc_num = results['tp']*results['tn'] - results['fp']*results['fn']\n mcc_denom_0 = (results['tp'] + results['fp'])\n mcc_denom_1 = (results['tp'] + results['fn'])\n mcc_denom_2 = (results['tn'] + results['fp'])\n mcc_denom_3 = (results['tn'] + results['fn'])\n mcc_denom = mcc_denom_0 * mcc_denom_1 * mcc_denom_2*mcc_denom_3\n results['mcc'] = mcc_num/np.sqrt(mcc_denom)\n results['l'] = (results['p'] + results['n']) * \\\n np.log(results['p'] + results['n'])\n tp_fp = (results['tp'] + results['fp'])\n tp_fn = (results['tp'] + results['fn'])\n tn_fp = (results['fp'] + results['tn'])\n tn_fn = (results['fn'] + results['tn'])\n results['ltp'] = results['tp']*np.log(results['tp']/(tp_fp*tp_fn))\n results['lfp'] = results['fp']*np.log(results['fp']/(tp_fp*tn_fp))\n results['lfn'] = results['fn']*np.log(results['fn']/(tp_fn*tn_fn))\n results['ltn'] = results['tn']*np.log(results['tn']/(tn_fp*tn_fn))\n results['lp'] = results['p'] * \\\n np.log(results['p']/(results['p'] + results['n']))\n results['ln'] = results['n'] * \\\n np.log(results['n']/(results['p'] + results['n']))\n uc_num = (results['l'] + results['ltp'] + results['lfp'] +\n results['lfn'] + results['ltn'])\n uc_denom = (results['l'] + results['lp'] + results['ln'])\n results['uc'] = uc_num/uc_denom\n results['informedness'] = results['sens'] + results['spec'] - 1.0\n results['markedness'] = results['ppv'] + results['npv'] - 1.0\n results['log_loss'] = log_loss(all_test, all_pred)\n results['auc'] = roc_auc_score(all_test, all_pred[:, 1])\n aucs = [roc_auc_score(all_test[all_folds == i],\n all_pred[all_folds == i, 1])\n for i in range(np.max(all_folds)+1)]\n results['auc_mean'] = np.mean(aucs)\n results['auc_std'] = np.std(aucs)\n test_labels, preds = zip(\n *sorted(zip(all_test, all_pred[:, 1]), key=lambda x: -x[1]))\n test_labels = np.array(test_labels)\n th = int(0.2*len(test_labels))\n results['p_top20'] = np.sum(test_labels[:th] == 1)/th\n results['brier'] = np.mean((all_pred[:, 1] - all_test)**2)\n else:\n results['tp'] = 0\n results['tn'] = 0\n results['fp'] = 0\n results['fn'] = 0\n results['p'] = 0\n results['n'] = 0\n results['acc'] = 0\n results['sens'] = 0\n results['spec'] = 0\n results['ppv'] = 0\n results['npv'] = 0\n results['fpr'] = 1\n results['fdr'] = 1\n results['fnr'] = 1\n results['bacc'] = 0\n results['gacc'] = 0\n results['f1'] = 0\n results['mcc'] = np.nan\n results['l'] = np.nan\n results['ltp'] = np.nan\n results['lfp'] = np.nan\n results['lfn'] = np.nan\n results['ltn'] = np.nan\n results['lp'] = np.nan\n results['ln'] = np.nan\n results['uc'] = np.nan\n results['informedness'] = 0\n results['markedness'] = 0\n results['log_loss'] = np.nan\n results['auc'] = 0\n results['auc_mean'] = 0\n results['auc_std'] = 0\n results['p_top20'] = 0\n results['brier'] = 1\n\n return results\n\n def do_evaluation(self):\n \"\"\"\n Does the evaluation or reads it from file\n\n Returns:\n dict: all metrics\n \"\"\"\n\n if self.n_threads is not None:\n try:\n import mkl\n mkl.set_num_threads(self.n_threads)\n message = \" mkl thread number set to %d successfully\"\n message = message % self.n_threads\n _logger.info(self.__class__.__name__ + message)\n except Exception as e:\n message = \" setting mkl thread number didn't succeed\"\n _logger.info(self.__class__.__name__ + message)\n\n evaluations = {}\n if os.path.isfile(os.path.join(self.cache_path, self.filename)):\n evaluations = pickle.load(\n open(os.path.join(self.cache_path, self.filename), 'rb'))\n\n already_evaluated = np.array([li in evaluations for li in self.labels])\n\n if not np.all(already_evaluated):\n samp = self.sampling.do_sampling()\n else:\n return list(evaluations.values())\n\n # setting random states\n for i in range(len(self.classifiers)):\n clf_params = self.classifiers[i].get_params()\n if 'random_state' in clf_params:\n clf_params['random_state'] = self.random_state\n self.classifiers[i] = self.classifiers[i].__class__(\n **clf_params)\n if isinstance(self.classifiers[i], CalibratedClassifierCV):\n clf_params = self.classifiers[i].base_estimator.get_params()\n clf_params['random_state'] = self.random_state\n class_inst = self.classifiers[i].base_estimator.__class__\n new_inst = class_inst(**clf_params)\n self.classifiers[i].base_estimator = new_inst\n\n for i in range(len(self.classifiers)):\n if not already_evaluated[i]:\n message = \" do the evaluation %s %s %s\"\n message = message % (self.sampling.db_name,\n self.sampling.sampler.__name__,\n self.classifiers[i].__class__.__name__)\n _logger.info(self.__class__.__name__ + message)\n all_preds, all_tests, all_folds = [], [], []\n minority_class_label = None\n majority_class_label = None\n fold_idx = -1\n for X_train, y_train, X_test, y_test in samp['sampling']:\n fold_idx += 1\n\n # X_train[X_train == np.inf]= 0\n # X_train[X_train == -np.inf]= 0\n # X_test[X_test == np.inf]= 0\n # X_test[X_test == -np.inf]= 0\n\n class_labels = np.unique(y_train)\n min_class_size = np.min(\n [np.sum(y_train == c) for c in class_labels])\n\n ss = StandardScaler()\n X_train_trans = ss.fit_transform(X_train)\n nonzero_var_idx = np.where(ss.var_ > 1e-8)[0]\n X_test_trans = ss.transform(X_test)\n\n enough_minority_samples = min_class_size > 4\n y_train_big_enough = len(y_train) > 4\n two_classes = len(class_labels) > 1\n at_least_one_feature = (len(nonzero_var_idx) > 0)\n\n if not enough_minority_samples:\n message = \" not enough minority samples: %d\"\n message = message % min_class_size\n _logger.warning(\n self.__class__.__name__ + message)\n elif not y_train_big_enough:\n message = (\" number of minority training samples is \"\n \"not enough: %d\")\n message = message % len(y_train)\n _logger.warning(self.__class__.__name__ + message)\n elif not two_classes:\n message = \" there is only 1 class in training data\"\n _logger.warning(self.__class__.__name__ + message)\n elif not at_least_one_feature:\n _logger.warning(self.__class__.__name__ +\n (\" no information in features\"))\n else:\n all_tests.append(y_test)\n if (minority_class_label is None or\n majority_class_label is None):\n class_labels = np.unique(y_train)\n n_0 = sum(class_labels[0] == y_test)\n n_1 = sum(class_labels[1] == y_test)\n if n_0 < n_1:\n minority_class_label = int(class_labels[0])\n majority_class_label = int(class_labels[1])\n else:\n minority_class_label = int(class_labels[1])\n majority_class_label = int(class_labels[0])\n\n X_fit = X_train_trans[:, nonzero_var_idx]\n self.classifiers[i].fit(X_fit, y_train)\n clf = self.classifiers[i]\n X_pred = X_test_trans[:, nonzero_var_idx]\n pred = clf.predict_proba(X_pred)\n all_preds.append(pred)\n all_folds.append(\n np.repeat(fold_idx, len(all_preds[-1])))\n\n if len(all_tests) > 0:\n all_preds = np.vstack(all_preds)\n all_tests = np.hstack(all_tests)\n all_folds = np.hstack(all_folds)\n\n evaluations[self.labels[i]] = self.calculate_metrics(\n all_preds, all_tests, all_folds)\n else:\n evaluations[self.labels[i]] = self.calculate_metrics(\n None, None, None)\n\n evaluations[self.labels[i]]['runtime'] = samp['runtime']\n sampler_name = self.sampling.sampler.__name__\n evaluations[self.labels[i]]['sampler'] = sampler_name\n clf_name = self.classifiers[i].__class__.__name__\n evaluations[self.labels[i]]['classifier'] = clf_name\n sampler_parameters = self.sampling.sampler_parameters.copy()\n\n evaluations[self.labels[i]]['sampler_parameters'] = str(\n sampler_parameters)\n evaluations[self.labels[i]]['classifier_parameters'] = str(\n self.classifiers[i].get_params())\n evaluations[self.labels[i]]['sampler_categories'] = str(\n self.sampling.sampler.categories)\n evaluations[self.labels[i]\n ]['db_name'] = self.sampling.folding.db_name\n evaluations[self.labels[i]]['db_size'] = samp['db_size']\n evaluations[self.labels[i]]['db_n_attr'] = samp['db_n_attr']\n evaluations[self.labels[i]\n ]['imbalanced_ratio'] = samp['imbalanced_ratio']\n\n if not np.all(already_evaluated):\n _logger.info(self.__class__.__name__ +\n (\" dumping to file %s\" % self.filename))\n random_filename = os.path.join(self.cache_path, str(\n np.random.randint(1000000)) + '.pickle')\n pickle.dump(evaluations, open(random_filename, \"wb\"))\n os.rename(random_filename, os.path.join(\n self.cache_path, self.filename))\n\n return list(evaluations.values())\n\n\ndef trans(X):\n \"\"\"\n Transformation function used to aggregate the evaluation results.\n\n Args:\n X (pd.DataFrame): a grouping of a data frame containing evaluation\n results\n \"\"\"\n auc_std = X.iloc[np.argmax(X['auc_mean'].values)]['auc_std']\n cp_auc = X.sort_values('auc')['classifier_parameters'].iloc[-1]\n cp_acc = X.sort_values('acc')['classifier_parameters'].iloc[-1]\n cp_gacc = X.sort_values('gacc')['classifier_parameters'].iloc[-1]\n cp_f1 = X.sort_values('f1')['classifier_parameters'].iloc[-1]\n cp_p_top20 = X.sort_values('p_top20')['classifier_parameters'].iloc[-1]\n cp_brier = X.sort_values('brier')['classifier_parameters'].iloc[-1]\n sp_auc = X.sort_values('auc')['sampler_parameters'].iloc[-1]\n sp_acc = X.sort_values('acc')['sampler_parameters'].iloc[-1]\n sp_gacc = X.sort_values('gacc')['sampler_parameters'].iloc[-1]\n sp_f1 = X.sort_values('f1')['sampler_parameters'].iloc[-1]\n sp_p_top20 = X.sort_values('p_top20')['sampler_parameters'].iloc[-1]\n sp_brier = X.sort_values('p_top20')['sampler_parameters'].iloc[0]\n\n return pd.DataFrame({'auc': np.max(X['auc']),\n 'auc_mean': np.max(X['auc_mean']),\n 'auc_std': auc_std,\n 'brier': np.min(X['brier']),\n 'acc': np.max(X['acc']),\n 'f1': np.max(X['f1']),\n 'p_top20': np.max(X['p_top20']),\n 'gacc': np.max(X['gacc']),\n 'runtime': np.mean(X['runtime']),\n 'db_size': X['db_size'].iloc[0],\n 'db_n_attr': X['db_n_attr'].iloc[0],\n 'imbalanced_ratio': X['imbalanced_ratio'].iloc[0],\n 'sampler_categories': X['sampler_categories'].iloc[0],\n 'classifier_parameters_auc': cp_auc,\n 'classifier_parameters_acc': cp_acc,\n 'classifier_parameters_gacc': cp_gacc,\n 'classifier_parameters_f1': cp_f1,\n 'classifier_parameters_p_top20': cp_p_top20,\n 'classifier_parameters_brier': cp_brier,\n 'sampler_parameters_auc': sp_auc,\n 'sampler_parameters_acc': sp_acc,\n 'sampler_parameters_gacc': sp_gacc,\n 'sampler_parameters_f1': sp_f1,\n 'sampler_parameters_p_top20': sp_p_top20,\n 'sampler_parameters_brier': sp_brier,\n }, index=[0])\n\n\ndef _clone_classifiers(classifiers):\n \"\"\"\n Clones a set of classifiers\n\n Args:\n classifiers (list): a list of classifier objects\n \"\"\"\n results = []\n for c in classifiers:\n if isinstance(c, MLPClassifierWrapper):\n results.append(c.copy())\n else:\n results.append(clone(c))\n\n return results\n\n\ndef _cache_samplings(folding,\n samplers,\n scaler,\n max_n_sampler_par_comb=35,\n n_jobs=1,\n random_state=None):\n \"\"\"\n\n \"\"\"\n _logger.info(\"create sampling objects, random_state: %s\" %\n str(random_state or \"\"))\n sampling_objs = []\n\n random_state_init = random_state\n random_state = np.random.RandomState(random_state_init)\n\n _logger.info(\"samplers: %s\" % str(samplers))\n for s in samplers:\n sampling_par_comb = s.parameter_combinations()\n _logger.info(sampling_par_comb)\n domain = np.array(list(range(len(sampling_par_comb))))\n n_random = min([len(sampling_par_comb), max_n_sampler_par_comb])\n random_indices = random_state.choice(domain, n_random, replace=False)\n _logger.info(\"random_indices: %s\" % random_indices)\n sampling_par_comb = [sampling_par_comb[i] for i in random_indices]\n _logger.info(sampling_par_comb)\n\n for spc in sampling_par_comb:\n sampling_objs.append(Sampling(folding,\n s,\n spc,\n scaler,\n random_state_init))\n\n # sorting sampling objects to optimize execution\n def key(x):\n if (isinstance(x.sampler, ADG) or isinstance(x.sampler, AMSCO) or\n isinstance(x.sampler, DSRBF)):\n if 'proportion' in x.sampler_parameters:\n return 30 + x.sampler_parameters['proportion']\n else:\n return 30\n elif 'proportion' in x.sampler_parameters:\n return x.sampler_parameters['proportion']\n elif OverSampling.cat_memetic in x.sampler.categories:\n return 20\n else:\n return 10\n\n sampling_objs = list(reversed(sorted(sampling_objs, key=key)))\n\n # executing sampling in parallel\n _logger.info(\"executing %d sampling in parallel\" % len(sampling_objs))\n Parallel(n_jobs=n_jobs, batch_size=1)(delayed(s.cache_sampling)()\n for s in sampling_objs)\n\n return sampling_objs\n\n\ndef _cache_evaluations(sampling_objs,\n classifiers,\n n_jobs=1,\n random_state=None):\n # create evaluation objects\n _logger.info(\"create classifier jobs\")\n evaluation_objs = []\n\n num_threads = None if n_jobs is None or n_jobs == 1 else 1\n\n for s in sampling_objs:\n evaluation_objs.append(Evaluation(s, _clone_classifiers(\n classifiers), num_threads, random_state))\n\n _logger.info(\"executing %d evaluation jobs in parallel\" %\n (len(evaluation_objs)))\n # execute evaluation in parallel\n evals = Parallel(n_jobs=n_jobs, batch_size=1)(\n delayed(e.do_evaluation)() for e in evaluation_objs)\n\n return evals\n\n\ndef _read_db_results(cache_path_db):\n results = []\n evaluation_files = glob.glob(os.path.join(cache_path_db, 'eval*.pickle'))\n\n for f in evaluation_files:\n eval_results = pickle.load(open(f, 'rb'))\n results.append(list(eval_results.values()))\n\n return results\n\n\ndef read_oversampling_results(datasets, cache_path=None, all_results=False):\n \"\"\"\n Reads the results of the evaluation\n\n Args:\n datasets (list): list of datasets and/or dataset loaders - a dataset\n is a dict with 'data', 'target' and 'name' keys\n cache_path (str): path to a cache directory\n all_results (bool): True to return all results, False to return an\n aggregation\n\n Returns:\n pd.DataFrame: all results or the aggregated results if all_results is\n False\n \"\"\"\n\n results = []\n for dataset_spec in datasets:\n\n # loading dataset if needed and determining dataset name\n if not isinstance(dataset_spec, dict):\n dataset = dataset_spec()\n else:\n dataset = dataset_spec\n\n if 'name' in dataset:\n dataset_name = dataset['name']\n else:\n dataset_name = dataset_spec.__name__\n\n dataset['name'] = dataset_name\n\n # determining dataset specific cache path\n cache_path_db = os.path.join(cache_path, dataset_name)\n\n # reading the results\n res = _read_db_results(cache_path_db)\n\n # concatenating the results\n _logger.info(\"concatenating results\")\n db_res = [pd.DataFrame(r) for r in res]\n db_res = pd.concat(db_res).reset_index(drop=True)\n\n _logger.info(\"aggregating the results\")\n if all_results is False:\n db_res = db_res.groupby(by=['db_name', 'classifier', 'sampler'])\n db_res.apply(trans).reset_index().drop('level_3', axis=1)\n\n results.append(db_res)\n\n return pd.concat(results).reset_index(drop=True)\n\n\ndef evaluate_oversamplers(datasets,\n samplers,\n classifiers,\n cache_path,\n validator=RepeatedStratifiedKFold(\n n_splits=5, n_repeats=3),\n scaler=None,\n all_results=False,\n remove_cache=False,\n max_samp_par_comb=35,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Evaluates oversampling techniques using various classifiers on various\n datasets\n\n Args:\n datasets (list): list of datasets and/or dataset loaders - a dataset\n is a dict with 'data', 'target' and 'name' keys\n samplers (list): list of oversampling classes/objects\n classifiers (list): list of classifier objects\n cache_path (str): path to a cache directory\n validator (obj): validator object\n scaler (obj): scaler object\n all_results (bool): True to return all results, False to return an\n aggregation\n remove_cache (bool): True to remove sampling objects after\n evaluation\n max_samp_par_comb (int): maximum number of sampler parameter\n combinations to be tested\n n_jobs (int): number of parallel jobs\n random_state (int/np.random.RandomState/None): initializer of the\n random state\n\n Returns:\n pd.DataFrame: all results or the aggregated results if all_results is\n False\n\n Example::\n\n import smote_variants as sv\n import imbalanced_datasets as imbd\n\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.neighbors import KNeighborsClassifier\n\n datasets= [imbd.load_glass2, imbd.load_ecoli4]\n oversamplers= [sv.SMOTE_ENN, sv.NEATER, sv.Lee]\n classifiers= [KNeighborsClassifier(n_neighbors= 3),\n KNeighborsClassifier(n_neighbors= 5),\n DecisionTreeClassifier()]\n\n cache_path= '/home/<user>/smote_validation/'\n\n results= evaluate_oversamplers(datasets,\n oversamplers,\n classifiers,\n cache_path)\n \"\"\"\n\n if cache_path is None:\n raise ValueError('cache_path is not specified')\n\n results = []\n for dataset_spec in datasets:\n # loading dataset if needed and determining dataset name\n if not isinstance(dataset_spec, dict):\n dataset = dataset_spec()\n else:\n dataset = dataset_spec\n\n if 'name' in dataset:\n dataset_name = dataset['name']\n else:\n dataset_name = dataset_spec.__name__\n\n dataset['name'] = dataset_name\n\n dataset_original_target = dataset['target'].copy()\n class_labels = np.unique(dataset['target'])\n n_0 = sum(dataset['target'] == class_labels[0])\n n_1 = sum(dataset['target'] == class_labels[1])\n if n_0 < n_1:\n min_label = class_labels[0]\n maj_label = class_labels[1]\n else:\n min_label = class_labels[1]\n maj_label = class_labels[0]\n min_ind = np.where(dataset['target'] == min_label)[0]\n maj_ind = np.where(dataset['target'] == maj_label)[0]\n np.put(dataset['target'], min_ind, 1)\n np.put(dataset['target'], maj_ind, 0)\n\n cache_path_db = os.path.join(cache_path, dataset_name)\n if not os.path.isdir(cache_path_db):\n _logger.info(\"creating cache directory\")\n os.makedirs(cache_path_db)\n\n # checking of samplings and evaluations are available\n samplings_available = False\n evaluations_available = False\n\n samplings = glob.glob(os.path.join(cache_path_db, 'sampling*.pickle'))\n if len(samplings) > 0:\n samplings_available = True\n\n evaluations = glob.glob(os.path.join(cache_path_db, 'eval*.pickle'))\n if len(evaluations) > 0:\n evaluations_available = True\n\n message = (\"dataset: %s, samplings_available: %s, \"\n \"evaluations_available: %s\")\n message = message % (dataset_name, str(samplings_available),\n str(evaluations_available))\n _logger.info(message)\n\n if (remove_cache and evaluations_available and\n not samplings_available):\n # remove_cache is enabled and evaluations are available,\n # they are being read\n message = (\"reading result from cache, sampling and evaluation is\"\n \" not executed\")\n _logger.info(message)\n res = _read_db_results(cache_path_db)\n else:\n _logger.info(\"doing the folding\")\n folding = Folding(dataset, validator, cache_path_db, random_state)\n folding.do_folding()\n\n _logger.info(\"do the samplings\")\n sampling_objs = _cache_samplings(folding,\n samplers,\n scaler,\n max_samp_par_comb,\n n_jobs,\n random_state)\n\n _logger.info(\"do the evaluations\")\n res = _cache_evaluations(\n sampling_objs, classifiers, n_jobs, random_state)\n\n dataset['target'] = dataset_original_target\n\n # removing samplings once everything is done\n if remove_cache:\n filenames = glob.glob(os.path.join(cache_path_db, 'sampling*'))\n _logger.info(\"removing unnecessary sampling files\")\n if len(filenames) > 0:\n for f in filenames:\n os.remove(f)\n\n _logger.info(\"concatenating the results\")\n db_res = [pd.DataFrame(r) for r in res]\n db_res = pd.concat(db_res).reset_index(drop=True)\n\n random_filename = os.path.join(cache_path_db, str(\n np.random.randint(1000000)) + '.pickle')\n pickle.dump(db_res, open(random_filename, \"wb\"))\n os.rename(random_filename, os.path.join(\n cache_path_db, 'results.pickle'))\n\n _logger.info(\"aggregating the results\")\n if all_results is False:\n db_res = db_res.groupby(by=['db_name', 'classifier', 'sampler'])\n db_res = db_res.apply(trans).reset_index().drop('level_3', axis=1)\n\n results.append(db_res)\n\n return pd.concat(results).reset_index(drop=True)\n\n\ndef model_selection(dataset,\n samplers,\n classifiers,\n cache_path,\n score='auc',\n validator=RepeatedStratifiedKFold(n_splits=5, n_repeats=3),\n remove_cache=False,\n max_samp_par_comb=35,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Evaluates oversampling techniques on various classifiers and a dataset\n and returns the oversampling and classifier objects giving the best\n performance\n\n Args:\n dataset (dict): a dataset is a dict with 'data', 'target' and 'name'\n keys\n samplers (list): list of oversampling classes/objects\n classifiers (list): list of classifier objects\n cache_path (str): path to a cache directory\n score (str): 'auc'/'acc'/'gacc'/'f1'/'brier'/'p_top20'\n validator (obj): validator object\n all_results (bool): True to return all results, False to return an\n aggregation\n remove_cache (bool): True to remove sampling objects after\n evaluation\n max_samp_par_comb (int): maximum number of sampler parameter\n combinations to be tested\n n_jobs (int): number of parallel jobs\n random_state (int/np.random.RandomState/None): initializer of the\n random state\n\n Returns:\n obj, obj: the best performing sampler object and the best performing\n classifier object\n\n Example::\n\n import smote_variants as sv\n import imbalanced_datasets as imbd\n\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.neighbors import KNeighborsClassifier\n\n datasets= imbd.load_glass2()\n oversamplers= [sv.SMOTE_ENN, sv.NEATER, sv.Lee]\n classifiers= [KNeighborsClassifier(n_neighbors= 3),\n KNeighborsClassifier(n_neighbors= 5),\n DecisionTreeClassifier()]\n\n cache_path= '/home/<user>/smote_validation/'\n\n sampler, classifier= model_selection(dataset,\n oversamplers,\n classifiers,\n cache_path,\n 'auc')\n \"\"\"\n\n if score not in ['auc', 'acc', 'gacc', 'f1', 'brier', 'p_top20']:\n raise ValueError(\"score %s not supported\" % score)\n\n results = evaluate_oversamplers(datasets=[dataset],\n samplers=samplers,\n classifiers=classifiers,\n cache_path=cache_path,\n validator=validator,\n remove_cache=remove_cache,\n max_samp_par_comb=max_samp_par_comb,\n n_jobs=n_jobs,\n random_state=random_state)\n\n # extracting the best performing classifier and oversampler parameters\n # regarding AUC\n highest_score = results[score].idxmax()\n cl_par_name = 'classifier_parameters_' + score\n samp_par_name = 'sampler_parameters_' + score\n cl, cl_par, samp, samp_par = results.loc[highest_score][['classifier',\n cl_par_name,\n 'sampler',\n samp_par_name]]\n\n # instantiating the best performing oversampler and classifier objects\n samp_obj = eval(samp)(**eval(samp_par))\n cl_obj = eval(cl)(**eval(cl_par))\n\n return samp_obj, cl_obj\n\n\ndef cross_validate(dataset,\n sampler,\n classifier,\n validator=RepeatedStratifiedKFold(n_splits=5, n_repeats=3),\n scaler=StandardScaler(),\n random_state=None):\n \"\"\"\n Evaluates oversampling techniques on various classifiers and a dataset\n and returns the oversampling and classifier objects giving the best\n performance\n\n Args:\n dataset (dict): a dataset is a dict with 'data', 'target' and 'name'\n keys\n samplers (list): list of oversampling classes/objects\n classifiers (list): list of classifier objects\n validator (obj): validator object\n scaler (obj): scaler object\n random_state (int/np.random.RandomState/None): initializer of the\n random state\n\n Returns:\n pd.DataFrame: the cross-validation scores\n\n Example::\n\n import smote_variants as sv\n import imbalanced_datasets as imbd\n\n from sklearn.neighbors import KNeighborsClassifier\n\n dataset= imbd.load_glass2()\n sampler= sv.SMOTE_ENN\n classifier= KNeighborsClassifier(n_neighbors= 3)\n\n sampler, classifier= model_selection(dataset,\n oversampler,\n classifier)\n \"\"\"\n\n class_labels = np.unique(dataset['target'])\n binary_problem = (len(class_labels) == 2)\n\n dataset_orig_target = dataset['target'].copy()\n if binary_problem:\n _logger.info(\"The problem is binary\")\n n_0 = sum(dataset['target'] == class_labels[0])\n n_1 = sum(dataset['target'] == class_labels[1])\n if n_0 < n_1:\n min_label = class_labels[0]\n maj_label = class_labels[1]\n else:\n min_label = class_labels[0]\n maj_label = class_labels[1]\n\n min_ind = np.where(dataset['target'] == min_label)[0]\n maj_ind = np.where(dataset['target'] == maj_label)[0]\n np.put(dataset['target'], min_ind, 1)\n np.put(dataset['target'], maj_ind, 0)\n else:\n _logger.info(\"The problem is not binary\")\n label_indices = {}\n for c in class_labels:\n label_indices[c] = np.where(dataset['target'] == c)[0]\n mapping = {}\n for i, c in enumerate(class_labels):\n np.put(dataset['target'], label_indices[c], i)\n mapping[i] = c\n\n runtimes = []\n all_preds, all_tests = [], []\n\n for train, test in validator.split(dataset['data'], dataset['target']):\n _logger.info(\"Executing fold\")\n X_train, y_train = dataset['data'][train], dataset['target'][train]\n X_test, y_test = dataset['data'][test], dataset['target'][test]\n\n begin = time.time()\n X_samp, y_samp = sampler.sample(X_train, y_train)\n runtimes.append(time.time() - begin)\n\n X_samp_trans = scaler.fit_transform(X_samp)\n nonzero_var_idx = np.where(scaler.var_ > 1e-8)[0]\n X_test_trans = scaler.transform(X_test)\n\n all_tests.append(y_test)\n\n classifier.fit(X_samp_trans[:, nonzero_var_idx], y_samp)\n all_preds.append(classifier.predict_proba(\n X_test_trans[:, nonzero_var_idx]))\n\n if len(all_tests) > 0:\n all_preds = np.vstack(all_preds)\n all_tests = np.hstack(all_tests)\n\n dataset['target'] = dataset_orig_target\n\n _logger.info(\"Computing the results\")\n\n results = {}\n results['runtime'] = np.mean(runtimes)\n results['sampler'] = sampler.__class__.__name__\n results['classifier'] = classifier.__class__.__name__\n results['sampler_parameters'] = str(sampler.get_params())\n results['classifier_parameters'] = str(classifier.get_params())\n results['db_size'] = len(dataset['data'])\n results['db_n_attr'] = len(dataset['data'][0])\n results['db_n_classes'] = len(class_labels)\n\n if binary_problem:\n results['imbalance_ratio'] = sum(\n dataset['target'] == maj_label)/sum(dataset['target'] == min_label)\n all_pred_labels = np.apply_along_axis(\n lambda x: np.argmax(x), 1, all_preds)\n\n results['tp'] = np.sum(np.logical_and(\n np.equal(all_tests, all_pred_labels), (all_tests == 1)))\n results['tn'] = np.sum(np.logical_and(\n np.equal(all_tests, all_pred_labels), (all_tests == 0)))\n results['fp'] = np.sum(np.logical_and(np.logical_not(\n np.equal(all_tests, all_pred_labels)), (all_tests == 0)))\n results['fn'] = np.sum(np.logical_and(np.logical_not(\n np.equal(all_tests, all_pred_labels)), (all_tests == 1)))\n results['p'] = results['tp'] + results['fn']\n results['n'] = results['fp'] + results['tn']\n results['acc'] = (results['tp'] + results['tn']) / \\\n (results['p'] + results['n'])\n results['sens'] = results['tp']/results['p']\n results['spec'] = results['tn']/results['n']\n results['ppv'] = results['tp']/(results['tp'] + results['fp'])\n results['npv'] = results['tn']/(results['tn'] + results['fn'])\n results['fpr'] = 1.0 - results['spec']\n results['fdr'] = 1.0 - results['ppv']\n results['fnr'] = 1.0 - results['sens']\n results['bacc'] = (results['tp']/results['p'] +\n results['tn']/results['n'])/2.0\n results['gacc'] = np.sqrt(\n results['tp']/results['p']*results['tn']/results['n'])\n results['f1'] = 2*results['tp'] / \\\n (2*results['tp'] + results['fp'] + results['fn'])\n mcc_num = (results['tp']*results['tn'] - results['fp']*results['fn'])\n tp_fp = (results['tp'] + results['fp'])\n tp_fn = (results['tp'] + results['fn'])\n tn_fp = (results['tn'] + results['fp'])\n tn_fn = (results['tn'] + results['fn'])\n mcc_denom = np.sqrt(tp_fp * tp_fn * tn_fp * tn_fn)\n results['mcc'] = mcc_num/mcc_denom\n results['l'] = (results['p'] + results['n']) * \\\n np.log(results['p'] + results['n'])\n results['ltp'] = results['tp']*np.log(results['tp']/(\n (results['tp'] + results['fp'])*(results['tp'] + results['fn'])))\n results['lfp'] = results['fp']*np.log(results['fp']/(\n (results['fp'] + results['tp'])*(results['fp'] + results['tn'])))\n results['lfn'] = results['fn']*np.log(results['fn']/(\n (results['fn'] + results['tp'])*(results['fn'] + results['tn'])))\n results['ltn'] = results['tn']*np.log(results['tn']/(\n (results['tn'] + results['fp'])*(results['tn'] + results['fn'])))\n results['lp'] = results['p'] * \\\n np.log(results['p']/(results['p'] + results['n']))\n results['ln'] = results['n'] * \\\n np.log(results['n']/(results['p'] + results['n']))\n ucc_num = (results['l'] + results['ltp'] + results['lfp'] +\n results['lfn'] + results['ltn'])\n results['uc'] = ucc_num/(results['l'] + results['lp'] + results['ln'])\n results['informedness'] = results['sens'] + results['spec'] - 1.0\n results['markedness'] = results['ppv'] + results['npv'] - 1.0\n results['log_loss'] = log_loss(all_tests, all_preds)\n results['auc'] = roc_auc_score(all_tests, all_preds[:, 1])\n test_labels, preds = zip(\n *sorted(zip(all_tests, all_preds[:, 1]), key=lambda x: -x[1]))\n test_labels = np.array(test_labels)\n th = int(0.2*len(test_labels))\n results['p_top20'] = np.sum(test_labels[:th] == 1)/th\n results['brier'] = np.mean((all_preds[:, 1] - all_tests)**2)\n else:\n all_pred_labels = np.apply_along_axis(\n lambda x: np.argmax(x), 1, all_preds)\n\n results['acc'] = accuracy_score(all_tests, all_pred_labels)\n results['confusion_matrix'] = confusion_matrix(\n all_tests, all_pred_labels)\n sum_confusion = np.sum(results['confusion_matrix'], axis=0)\n results['gacc'] = gmean(np.diagonal(\n results['confusion_matrix'])/sum_confusion)\n results['class_label_mapping'] = mapping\n\n return pd.DataFrame({'value': list(results.values())},\n index=results.keys())\n"
] |
[
[
"scipy.spatial.Voronoi",
"numpy.linalg.matrix_rank",
"numpy.sqrt",
"sklearn.model_selection.KFold",
"numpy.all",
"sklearn.cluster.AgglomerativeClustering",
"numpy.exp",
"numpy.where",
"sklearn.preprocessing.MinMaxScaler",
"tensorflow.random.set_seed",
"numpy.unique",
"numpy.full",
"numpy.block",
"numpy.diff",
"numpy.outer",
"tensorflow.compat.v1.set_random_seed",
"numpy.zeros",
"numpy.log",
"pandas.concat",
"numpy.multiply",
"numpy.median",
"scipy.signal.find_peaks_cwt",
"numpy.delete",
"numpy.equal",
"numpy.array",
"numpy.sum",
"numpy.inner",
"numpy.isinf",
"numpy.vstack",
"sklearn.metrics.roc_auc_score",
"sklearn.cluster.KMeans",
"numpy.nan_to_num",
"sklearn.cluster.DBSCAN",
"numpy.int",
"numpy.fill_diagonal",
"numpy.var",
"tensorflow.compat.v1.keras.backend.set_session",
"numpy.linalg.cond",
"numpy.std",
"numpy.argmax",
"scipy.stats.skew",
"numpy.min",
"sklearn.manifold.Isomap",
"sklearn.manifold.LocallyLinearEmbedding",
"numpy.arccos",
"sklearn.metrics.log_loss",
"tensorflow.set_random_seed",
"sklearn.mixture.GaussianMixture",
"numpy.not_equal",
"numpy.random.RandomState",
"tensorflow.compat.v1.get_default_graph",
"numpy.linalg.solve",
"numpy.maximum",
"sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis",
"sklearn.linear_model.LinearRegression",
"numpy.empty",
"sklearn.neural_network.MLPClassifier",
"numpy.take",
"sklearn.metrics.confusion_matrix",
"pandas.DataFrame",
"numpy.round",
"sklearn.manifold.TSNE",
"sklearn.base.clone",
"numpy.mean",
"numpy.argmin",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"numpy.random.randint",
"numpy.hstack",
"numpy.clip",
"sklearn.model_selection.StratifiedKFold",
"numpy.linalg.det",
"numpy.column_stack",
"numpy.repeat",
"numpy.logical_not",
"sklearn.naive_bayes.GaussianNB",
"numpy.linalg.inv",
"numpy.logspace",
"numpy.isnan",
"numpy.rint",
"numpy.logical_or",
"numpy.cov",
"sklearn.svm.SVC",
"numpy.argsort",
"sklearn.metrics.pairwise.pairwise_distances",
"sklearn.decomposition.PCA",
"numpy.diagonal",
"tensorflow.compat.v1.ConfigProto",
"sklearn.linear_model.LogisticRegression",
"numpy.linalg.norm",
"numpy.bincount",
"numpy.dot",
"numpy.max",
"sklearn.tree.DecisionTreeClassifier",
"scipy.optimize.linear_sum_assignment",
"sklearn.ensemble.RandomForestClassifier",
"numpy.linalg.eig",
"numpy.arange",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.neighbors.NearestNeighbors",
"scipy.special.erf",
"numpy.linalg.cholesky",
"numpy.logical_and",
"sklearn.model_selection.cross_val_score",
"numpy.abs",
"numpy.random.seed",
"numpy.put",
"sklearn.model_selection.RepeatedStratifiedKFold",
"numpy.prod",
"sklearn.preprocessing.StandardScaler",
"sklearn.metrics.accuracy_score"
]
] |
JonathanCamargo/Eris
|
[
"34c389f0808c8b47933605ed19d98e62280e56dd"
] |
[
"python/Examples/BandwidthTest.py"
] |
[
"import rospy\nimport threading\nimport numpy as np\nfrom std_msgs.msg import Header,String\nfrom custom_msgs.msg import Float32\n\nimport copy\n\n#ROS TEST to exceed WLAN bandwidth\nfrom threading import Thread,Lock\nfrom time import sleep\n\ndataMutex=Lock()\nrospy.init_node('talker', anonymous=True)\n\nglobal publishers\npublishers = []\n\nRATE=50 #HZ\nN=30 #Topics\nM=5 #Samples\ndt=1.0/(RATE*M)\nTOTALTIME=10\n\nfor i in range(0,N):\n topicName='float'+str(i)\n publishers.append(rospy.Publisher(topicName, Float32, queue_size=10))\n\ndef publishDataNoThread(time,dt,data):\n global publishers\n for i,p in enumerate(publishers):\n for j,d in enumerate(data[i,:]):\n t=time-rospy.Duration(dt*(j-1))\n header=Header(stamp=t)\n msg=Float32(header,d)\n p.publish(msg)\n\ndef publishData(time,data):\n global publishers\n for i,p in enumerate(publishers):\n for j,d in enumerate(data[i,:]):\n t=time-rospy.Duration(dt*(j-1))\n header=Header(stamp=t)\n msg=Float32(header,d)\n publishers[i].publish(msg)\n\ndef wasteTime(time,data):\n sleep(1)\n\nrate = rospy.Rate(RATE)\n\nprint('Inicio')\n\ncounter=0\nt0=rospy.Time.now()\nrate.sleep()\n\nfreeTime=[]\nfor i in range(0,int(TOTALTIME*RATE)):\n #Every loop iteration we have m samples of every topic\n #And launch independent threads to publish every topic\n\n #dataMutex.acquire(1)\n #data=np.random.rand(M,N)\n data=np.repeat([np.flip(np.arange(1,M+1),0)],[N],axis=0)\n data=data+counter\n\n counter=counter+M\n if counter+M>=1000:\n counter=0\n #dataMutex.release()\n time=rospy.Time.now()-t0\n time=rospy.Time()+time\n\n #One by one\n #for index,p in enumerate(publishers):\n # d=data[index,:]\n # t=Thread(target=publishData,args=(time,dt,p,d,))\n # t.start()\n #All in a thread\n t=Thread(target=publishData,args=(time,data))\n #t=Thread(target=wasteTime,args=(time,data))\n t.start()\n #No threads\n #publishDataNoThread(time,dt,publishers,data)\n r=rate.remaining()\n freeTime.append(r.secs+r.nsecs/1E9)\n rate.sleep()\n\ntend=rospy.Time.now()\ndur=tend-t0\nprint('Runtime: '+str(dur.secs+dur.nsecs/1E9))\nprint('END')\nprint('FreeTime(ms)')\nprint('mean:'+str(np.mean(freeTime)*1000))\nprint('std:'+str(np.std(freeTime)*1000))\nprint('min:'+str(np.min(freeTime)*1000))\n"
] |
[
[
"numpy.arange",
"numpy.std",
"numpy.mean",
"numpy.min"
]
] |
cow8/PupilTracking
|
[
"02977de83c9f1481b700a3bb0ac37f0f814b1faa"
] |
[
"train_evaluator/train.py"
] |
[
"# import alexnet\n# import resnet_v1\nimport platform\n\nimport tensorflow as tf\n\nfrom MakeDataset import TFRecord_Slim_Reader as TFRS_Reader\nfrom Train.VGG import vgg\n\nslim=tf.contrib.slim\nnetname=\"VGG16\"\nrate=1\nnet=vgg.vgg_16\nTFRECORDFILE=\"./dataset.tfrecord\"\nIS_PRETRAIN=True\nPRETRAIN_FILEPATH=\"./train_evaluator/pretrain_vgg/vgg_16.ckpt\"\ndef main(_dataset,model_path,log_dir):\n image,label=TFRS_Reader.PupilDataset(_dataset)\n image.set_shape([120, 120, 3])\n images, labels = tf.train.shuffle_batch([image, label], batch_size=32,capacity=50000,\n min_after_dequeue=20000)\n images = tf.image.resize_bicubic(images, [224, 224])\n\n predictions, end_points = net(images, num_classes=3)\n\n # Specify the loss function:\n slim.losses.softmax_cross_entropy(predictions, labels)\n\n total_loss = slim.losses.get_total_loss()\n slim.summary.scalar('losses/total_loss', total_loss)\n\n # Specify the optimization scheme:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=.001)\n\n # create_train_op that ensures that when we evaluate it to get the loss,\n # the update_ops are done and the gradient updates are computed.\n train_op = slim.learning.create_train_op(total_loss, optimizer)\n\n # Specify where the Model, trained on ImageNet, was saved.\n # model_path =\n\n # Specify where the new model will live:\n # log_dir =\n\n # Restore only the convolutional layers:\n variables_to_restore = slim.get_variables_to_restore(exclude=[ 'vgg_16/fc6','vgg_16/fc7','vgg_16/fc8'])\n init_fn = tf.contrib.framework.assign_from_checkpoint_fn(model_path, variables_to_restore)\n\n values, indices=tf.nn.top_k(predictions, 1)\n p=tf.one_hot(indices, 3, 1, 0)\n p=tf.reshape(p,(32,3))\n acc=slim.metrics.accuracy(p, labels)\n # op1=slim.summary.scalar('summaries/acc/acc', acc)\n # op2=slim.summary.scalar('summaries/var/total_loss', total_loss)\n # op3=slim.summary.scalar('summaries/var/predictions', indices[0][0])\n # op4=slim.summary.scalar('summaries/var/labels', tf.nn.top_k(labels, 1)[1][0][0])\n op=slim.summary.merge_all(key='summaries')\n op=tf.Print(op,[acc,total_loss,predictions,labels])\n # Start training.\n slim.learning.train(train_op, log_dir, init_fn=init_fn,save_summaries_secs=20,summary_op=op)\nif __name__ == '__main__':\n if platform.system() == 'Windows':\n _dataset, _model, _log=[\"./\"+TFRECORDFILE,PRETRAIN_FILEPATH,'./train_evaluator/log']\n else:\n _dataset, _model, _log = [TFRECORDFILE, PRETRAIN_FILEPATH, \"./train_evaluator/log\"]\n main(_dataset, _model, _log)"
] |
[
[
"tensorflow.Print",
"tensorflow.contrib.framework.assign_from_checkpoint_fn",
"tensorflow.reshape",
"tensorflow.image.resize_bicubic",
"tensorflow.nn.top_k",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.one_hot",
"tensorflow.train.shuffle_batch"
]
] |
lytai1/difusion_imaging
|
[
"4884e63d7c36cb05564a736a2f31f2beaacde6d4"
] |
[
"diffusion_imaging/preprocessing/preprocessing.py"
] |
[
"from dipy.segment.mask import median_otsu\nfrom abc import (\n ABC,\n abstractmethod\n)\nfrom dependency_injector import providers, containers\nimport numpy as np\nfrom dipy.segment.mask import median_otsu\n\nclass Preprocess(ABC):\n \"\"\"\n Base class for the preprocessing step\n \"\"\"\n \n @abstractmethod\n def process(self, data):\n pass\n\n\nclass Mask(Preprocess):\n \"\"\"\n Wrapper for the Mask process\n \"\"\"\n \n def _mask(self, image):\n maskdata, mask = median_otsu(image, vol_idx=[0, 1], median_radius=4, numpass=2,\n autocrop=False, dilate=1)\n\n axial_slice = 40\n mask_roi = np.zeros(image.shape[:-1], dtype=bool)\n mask_roi[:, :, axial_slice] = mask[:, :, axial_slice]\n \n return mask_roi\n \n def process(self, mri_data):\n \n return self._mask(mri_data)\n \n \nclass PreprocessProvider(providers.Factory):\n \n provided_type = Preprocess\n \n \nclass PreprocessContainer(containers.DeclarativeContainer):\n \n mask = PreprocessProvider(Mask)\n "
] |
[
[
"numpy.zeros"
]
] |
evijit/hci_updates
|
[
"7e6f564fe3f819f83baaffa7fd6fcd7ecbb58bfa"
] |
[
"code/ner_stuff.py"
] |
[
"from pathlib import Path\nimport spacy\nfrom nltk.tokenize import TweetTokenizer\nimport sys\nimport csv\nimport json\nimport re\nfrom pandas.io.json import json_normalize\n\ntknzr = TweetTokenizer()\ntknzr = TweetTokenizer(reduce_len=True)\n\nnlp2 = spacy.load(Path('ner_model_50'))\n\n\nfull_data = []\ncounter = 0\nwith open(sys.argv[1], \"r\") as json_in:\n line = json_in.readline()\n while line:\n try:\n if counter % 1000 == 0:\n sys.stderr.write(\"\\t\\tcounter --- %d\\n\" % counter)\n stuff = line.strip()\n \n obj = json.loads(stuff)\n text = obj['text']\n\n crap = \" \".join([w for w in tknzr.tokenize(text)])\n doc = nlp2(crap)\n\n\n tech_words = set()\n react_words = set()\n\n\n tech_working = \"\"\n react_working = \"\"\n for x in doc.ents:\n if x.label_ == \"B-TECH\":\n if tech_working:\n if tech_working in crap:\n tech_words.add(tech_working)\n tech_working = x.text\n\n elif x.label_ == \"B-REACT\":\n if react_working:\n if react_working in crap:\n react_words.add(react_words)\n react_working = x.text\n\n elif x.label_ == \"I-REACT\":\n tmp = react_working\n tmp += \" \" + x.text\n if tmp not in crap:\n if react_working in crap:\n react_words.add(react_working)\n react_working = x.text\n else:\n react_working = tmp\n \n elif x.label_ == \"I-TECH\":\n tmp = tech_working\n tmp += \" \" + x.text\n if tmp not in crap:\n if tech_working in crap:\n tech_words.add(tech_working)\n tech_working = x.text\n else:\n tech_working = tmp\n\n if tech_working:\n if tech_working in crap:\n tech_words.add(tech_working)\n if react_working:\n if react_working in crap:\n react_words.add(react_working)\n\n \n #print(\"Sentence: \")\n #print(crap)\n\n #for ent in doc.ents:\n # print(ent.label_, ent.text)\n #\n #print(\"tech\")\n #print(tech_words)\n #print()\n #print(\"react\")\n #print(react_words)\n #print(\"\\n\\n\")\n reported_set_tech = set()\n reported_set_react = set()\n \n\n for element in tech_words:\n stuff = re.split(\" \\. | \\.|\\. \", element)\n for shiz in stuff:\n if shiz and shiz != \".\":\n reported_set_tech.add(shiz)\n for element in react_words:\n stuff = re.split(\" \\. | \\.|\\. \", element)\n for shiz in stuff:\n if shiz and shiz != \".\":\n reported_set_react.add(shiz)\n\n\n obj[\"tech\"] = reported_set_tech\n obj[\"react\"] = reported_set_react\n\n \n full_data.append(obj)\n \n\n\n counter += 1\n \n line = json_in.readline()\n except TypeError as err:\n line = json_in.readline()\n counter += 1\n continue\ndf = json_normalize(full_data)\ndf.to_csv(\"csv_with_labels.csv\")\n"
] |
[
[
"pandas.io.json.json_normalize"
]
] |
Mucephie/ccdprocx
|
[
"da8e64f685045db22b40df960b03257b6a6c854b"
] |
[
"ccdprocx/tests/test_rebin.py"
] |
[
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport numpy as np\nimport pytest\n\nfrom astropy.nddata import StdDevUncertainty\n\nfrom astropy.tests.helper import catch_warnings\nfrom astropy.utils.exceptions import AstropyDeprecationWarning\n\nfrom ccdprocx.core import rebin\nfrom ccdprocx.tests.pytest_fixtures import ccd_data as ccd_data_func\n\n\n# test rebinning ndarray\ndef test_rebin_ndarray():\n with pytest.raises(TypeError), catch_warnings(AstropyDeprecationWarning):\n rebin(1, (5, 5))\n\n\n# test rebinning dimensions\ndef test_rebin_dimensions():\n ccd_data = ccd_data_func(data_size=10)\n with pytest.raises(ValueError), catch_warnings(AstropyDeprecationWarning):\n rebin(ccd_data.data, (5,))\n\n\n# test rebinning dimensions\ndef test_rebin_ccddata_dimensions():\n ccd_data = ccd_data_func(data_size=10)\n with pytest.raises(ValueError), catch_warnings(AstropyDeprecationWarning):\n rebin(ccd_data, (5,))\n\n\n# test rebinning works\ndef test_rebin_larger():\n ccd_data = ccd_data_func(data_size=10)\n a = ccd_data.data\n with catch_warnings(AstropyDeprecationWarning) as w:\n b = rebin(a, (20, 20))\n assert len(w) >= 1\n\n assert b.shape == (20, 20)\n np.testing.assert_almost_equal(b.sum(), 4 * a.sum())\n\n\n# test rebinning is invariant\ndef test_rebin_smaller():\n ccd_data = ccd_data_func(data_size=10)\n a = ccd_data.data\n with catch_warnings(AstropyDeprecationWarning) as w:\n b = rebin(a, (20, 20))\n c = rebin(b, (10, 10))\n assert len(w) >= 1\n\n assert c.shape == (10, 10)\n assert (c - a).sum() == 0\n\n\n# test rebinning with ccddata object\[email protected]('mask_data, uncertainty', [\n (False, False),\n (True, True)])\ndef test_rebin_ccddata(mask_data, uncertainty):\n ccd_data = ccd_data_func(data_size=10)\n if mask_data:\n ccd_data.mask = np.zeros_like(ccd_data)\n if uncertainty:\n err = np.random.normal(size=ccd_data.shape)\n ccd_data.uncertainty = StdDevUncertainty(err)\n\n with catch_warnings(AstropyDeprecationWarning) as w:\n b = rebin(ccd_data, (20, 20))\n assert len(w) >= 1\n\n assert b.shape == (20, 20)\n if mask_data:\n assert b.mask.shape == (20, 20)\n if uncertainty:\n assert b.uncertainty.array.shape == (20, 20)\n\n\ndef test_rebin_does_not_change_input():\n ccd_data = ccd_data_func()\n original = ccd_data.copy()\n with catch_warnings(AstropyDeprecationWarning) as w:\n _ = rebin(ccd_data, (20, 20))\n assert len(w) >= 1\n np.testing.assert_array_equal(original.data, ccd_data.data)\n assert original.unit == ccd_data.unit\n"
] |
[
[
"numpy.testing.assert_array_equal",
"numpy.random.normal",
"numpy.zeros_like"
]
] |
Extracheesy/KerasPlaypen
|
[
"cdd6eb9c82d57e36c8a5e0509b50a00f163fc734"
] |
[
"main.py"
] |
[
"# This is a sample Python script.\n\n# Press Maj+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\n\n# from numpy import loadtxt\nimport numpy as np\nimport tensorflow as tf\n\nfrom keras.optimizers import SGD\nfrom matplotlib import pyplot\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nfrom dataloader import *\nfrom train import *\nfrom predictor import *\n\ndef print_hi(name):\n # Use a breakpoint in the code line below to debug your script.\n print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.\n\n\ndef first_test():\n\n # load the dataset\n dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')\n # split into input (X) and output (y) variables\n X = dataset[:, 0:8]\n y = dataset[:, 8]\n\n # define the keras model\n model = Sequential()\n model.add(Dense(12, input_dim=8, activation='relu'))\n model.add(Dense(8, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n\n # compile the keras model\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n # fit the keras model on the dataset\n model.fit(X, y, epochs=150, batch_size=10)\n\n # evaluate the keras model\n _, accuracy = model.evaluate(X, y)\n print('Accuracy: %.2f' % (accuracy * 100))\n\n # make probability predictions with the model\n predictions = model.predict(X)\n # round predictions\n rounded = [round(x[0]) for x in predictions]\n\n # make class predictions with the model\n predictions = model.predict_classes(X)\n\n # summarize the first 5 cases\n total_runs = 700\n count_ok = 0\n for i in range(total_runs):\n if predictions[i] == y[i] :\n count_ok = count_ok + 1\n print('%s => %d (expected %d)' % (X[i].tolist(), predictions[i], y[i]))\n\n print(\"% success: \", count_ok, \" out of: \", total_runs, \" %: \", round(100 * count_ok / total_runs, 2))\n\n\n# Press the green button in the gutter to run the script.\n#if __name__ == '__main__':\n\n #first_test()\n\n # main_train()\n\n #test_yfinances()\n\n\n # main_predictor()\n\n #print_hi('PyCharm')\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n"
] |
[
[
"numpy.loadtxt"
]
] |
tschaume/fireworks
|
[
"0701e22f979472e5eb1b40db1d21a798462858dc"
] |
[
"fireworks/utilities/fw_utilities.py"
] |
[
"import contextlib\nimport datetime\nimport errno\nimport logging\nimport multiprocessing\nimport os\nimport socket\nimport string\nimport sys\nimport traceback\nfrom logging import Formatter, Logger\nfrom multiprocessing.managers import BaseManager\nfrom typing import Tuple\n\nfrom fireworks.fw_config import DS_PASSWORD, FW_BLOCK_FORMAT, FW_LOGGING_FORMAT, FWData\n\n__author__ = \"Anubhav Jain, Xiaohui Qu\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__version__ = \"0.1\"\n__maintainer__ = \"Anubhav Jain\"\n__email__ = \"[email protected]\"\n__date__ = \"Dec 12, 2012\"\n\nPREVIOUS_STREAM_LOGGERS = [] # contains the name of loggers that have already been initialized\nPREVIOUS_FILE_LOGGERS = [] # contains the name of file loggers that have already been initialized\nDEFAULT_FORMATTER = Formatter(FW_LOGGING_FORMAT)\n\n\ndef get_fw_logger(\n name: str,\n l_dir: None = None,\n file_levels: Tuple[str, str] = (\"DEBUG\", \"ERROR\"),\n stream_level: str = \"DEBUG\",\n formatter: Formatter = DEFAULT_FORMATTER,\n clear_logs: bool = False,\n) -> Logger:\n \"\"\"\n Convenience method to return a logger.\n\n Args:\n name: name of the logger that sets the groups, e.g. 'group1.set2'\n l_dir: the directory to put the log file\n file_levels: iterable describing level(s) to log to file(s). default: ('DEBUG', 'ERROR')\n stream_level: level to log to standard output. default: 'DEBUG'\n formatter: logging format. default: FW_LOGGING_FORMATTER\n clear_logs: whether to clear the logger with the same name\n \"\"\"\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG) # anything debug and above passes through to the handler level\n\n stream_level = stream_level if stream_level else \"CRITICAL\"\n # add handlers for the file_levels\n if l_dir:\n for lvl in file_levels:\n f_name = os.path.join(l_dir, name.replace(\".\", \"_\") + \"-\" + lvl.lower() + \".log\")\n mode = \"w\" if clear_logs else \"a\"\n fh = logging.FileHandler(f_name, mode=mode)\n fh.setLevel(getattr(logging, lvl))\n fh.setFormatter(formatter)\n if f_name not in PREVIOUS_FILE_LOGGERS:\n logger.addHandler(fh)\n PREVIOUS_FILE_LOGGERS.append(f_name)\n\n if (name, stream_level) not in PREVIOUS_STREAM_LOGGERS:\n # add stream handler\n sh = logging.StreamHandler(stream=sys.stdout)\n sh.setLevel(getattr(logging, stream_level))\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n PREVIOUS_STREAM_LOGGERS.append((name, stream_level))\n\n return logger\n\n\ndef log_multi(m_logger, msg, log_lvl=\"info\"):\n \"\"\"\n Args:\n m_logger (logger): The logger object\n msg (str): a String to log\n log_lvl (str): The level to log at\n \"\"\"\n _log_fnc = getattr(m_logger, log_lvl.lower())\n if FWData().MULTIPROCESSING:\n _log_fnc(f\"{msg} : ({multiprocessing.current_process().name})\")\n else:\n _log_fnc(msg)\n\n\ndef log_fancy(m_logger, msgs, log_lvl=\"info\", add_traceback=False):\n \"\"\"\n A wrapper around the logger messages useful for multi-line logs.\n Helps to group log messages by adding a fancy border around it,\n which enhances readability of log lines meant to be read\n as a unit.\n\n Args:\n m_logger (logger): The logger object\n log_lvl (str): The level to log at\n msgs ([str]): a String or iterable of Strings\n add_traceback (bool): add traceback text, useful when logging exceptions (default False)\n \"\"\"\n if isinstance(msgs, str):\n msgs = [msgs]\n\n _log_fnc = getattr(m_logger, log_lvl.lower())\n\n _log_fnc(\"----|vvv|----\")\n _log_fnc(\"\\n\".join(msgs))\n if add_traceback:\n _log_fnc(traceback.format_exc())\n _log_fnc(\"----|^^^|----\")\n\n\ndef log_exception(m_logger, msgs):\n \"\"\"\n A shortcut wrapper around log_fancy for exceptions\n\n Args:\n m_logger (logger): The logger object\n msgs ([str]): String or iterable of Strings, will be joined by newlines\n \"\"\"\n return log_fancy(m_logger, msgs, \"error\", add_traceback=True)\n\n\ndef create_datestamp_dir(root_dir, l_logger, prefix=\"block_\"):\n \"\"\"\n Internal method to create a new block or launcher directory.\n The dir name is based on the time and the FW_BLOCK_FORMAT\n\n Args:\n root_dir: directory to create the new dir in\n l_logger: the logger to use\n prefix: the prefix for the new dir, default=\"block_\"\n \"\"\"\n\n def get_path():\n time_now = datetime.datetime.utcnow().strftime(FW_BLOCK_FORMAT)\n block_path = prefix + time_now\n return os.path.join(root_dir, block_path)\n\n ctn = 0\n max_try = 10\n full_path = None\n while full_path is None:\n full_path = get_path()\n if os.path.exists(full_path):\n full_path = None\n import random\n import time\n\n time.sleep(random.random() / 3 + 0.1)\n continue\n else:\n try:\n os.mkdir(full_path)\n break\n except OSError as e:\n if ctn > max_try or e.errno != errno.EEXIST:\n raise e\n ctn += 1\n full_path = None\n continue\n\n l_logger.info(f\"Created new dir {full_path}\")\n return full_path\n\n\n_g_ip, _g_host = None, None\n\n\ndef get_my_ip():\n global _g_ip\n if _g_ip is None:\n try:\n _g_ip = socket.gethostbyname(socket.gethostname())\n except Exception:\n _g_ip = \"127.0.0.1\"\n return _g_ip\n\n\ndef get_my_host():\n global _g_host\n if _g_host is None:\n _g_host = socket.gethostname()\n return _g_host\n\n\ndef get_slug(m_str):\n valid_chars = f\"-_.() {string.ascii_letters}{string.digits}\"\n m_str = \"\".join(c for c in m_str if c in valid_chars)\n return m_str.replace(\" \", \"_\")\n\n\nclass DataServer(BaseManager):\n \"\"\"\n Provide a server that can host shared objects between multiprocessing\n Processes (that normally can't share data). For example, a common LaunchPad is\n shared between processes and pinging launches is coordinated to limit DB hits.\n \"\"\"\n\n @classmethod\n def setup(cls, launchpad):\n \"\"\"\n Args:\n launchpad (LaunchPad)\n\n Returns:\n DataServer\n \"\"\"\n DataServer.register(\"LaunchPad\", callable=lambda: launchpad)\n m = DataServer(address=(\"127.0.0.1\", 0), authkey=DS_PASSWORD) # random port\n m.start()\n return m\n\n\nclass NestedClassGetter:\n \"\"\"\n Used to help pickle inner classes, e.g. see Workflow.Links\n When called with the containing class as the first argument,\n and the name of the nested class as the second argument,\n returns an instance of the nested class.\n \"\"\"\n\n def __call__(self, containing_class, class_name):\n nested_class = getattr(containing_class, class_name)\n # return an instance of a nested_class. Some more intelligence could be\n # applied for class construction if necessary.\n # To support for Pickling of Workflow.Links\n return nested_class()\n\n\ndef explicit_serialize(o):\n module_name = o.__module__\n if module_name == \"__main__\":\n import __main__\n\n module_name = os.path.splitext(os.path.basename(__main__.__file__))[0]\n o._fw_name = f\"{{{{{module_name}.{o.__name__}}}}}\"\n return o\n\n\ndef plot_wf(\n wf,\n depth_factor=1.0,\n breadth_factor=2.0,\n labels_on=True,\n numerical_label=False,\n text_loc_factor=1.0,\n save_as=None,\n style=\"rD--\",\n markersize=10,\n markerfacecolor=\"blue\",\n fontsize=12,\n):\n \"\"\"\n Generate a visual representation of the workflow. Useful for checking whether the firework\n connections are in order before launching the workflow.\n\n Args:\n wf (Workflow): workflow object.\n depth_factor (float): adjust this to stretch the plot in y direction.\n breadth_factor (float): adjust this to stretch the plot in x direction.\n labels_on (bool): whether to label the nodes or not. The default is to label the nodes\n using the firework names.\n numerical_label (bool): set this to label the nodes using the firework ids.\n text_loc_factor (float): adjust the label location.\n save_as (str): save the figure to the given name.\n style (str): marker style.\n markersize (int): marker size.\n markerfacecolor (str): marker face color.\n fontsize (int): font size for the node label.\n \"\"\"\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n print(\"Install matplotlib. Exiting.\")\n sys.exit()\n\n keys = sorted(wf.links.keys(), reverse=True)\n n_root_nodes = len(wf.root_fw_ids)\n\n # set (x,y) coordinates for each node in the workflow links\n points_map = {}\n\n # root nodes\n for i, k in enumerate(wf.root_fw_ids):\n points_map.update({k: ((-0.5 * n_root_nodes + i) * breadth_factor, (keys[0] + 1) * depth_factor)})\n\n # the rest\n for k in keys:\n for i, j in enumerate(wf.links[k]):\n if not points_map.get(j, None):\n points_map[j] = ((i - len(wf.links[k]) / 2.0) * breadth_factor, k * depth_factor)\n\n # connect the dots\n for k in keys:\n for i in wf.links[k]:\n plt.plot(\n [points_map[k][0], points_map[i][0]],\n [points_map[k][1], points_map[i][1]],\n style,\n markersize=markersize,\n markerfacecolor=markerfacecolor,\n )\n if labels_on:\n label1 = wf.id_fw[k].name\n label2 = wf.id_fw[i].name\n if numerical_label:\n label1 = str(k)\n label2 = str(i)\n plt.text(\n points_map[k][0] * text_loc_factor, points_map[k][1] * text_loc_factor, label1, fontsize=fontsize\n )\n plt.text(\n points_map[i][0] * text_loc_factor, points_map[i][1] * text_loc_factor, label2, fontsize=fontsize\n )\n\n plt.axis(\"scaled\")\n plt.axis(\"off\")\n\n if save_as:\n plt.savefig(save_as)\n\n\[email protected]\ndef redirect_local():\n \"\"\"\n temporarily redirect stdout or stderr to fws.error and fws.out\n\n \"\"\"\n\n try:\n old_err = os.dup(sys.stderr.fileno())\n old_out = os.dup(sys.stdout.fileno())\n\n new_err = open(\"FW_job.error\", \"w\")\n new_out = open(\"FW_job.out\", \"w\")\n\n os.dup2(new_err.fileno(), sys.stderr.fileno())\n os.dup2(new_out.fileno(), sys.stdout.fileno())\n yield\n\n finally:\n\n os.dup2(old_err, sys.stderr.fileno())\n os.dup2(old_out, sys.stdout.fileno())\n\n new_err.close()\n new_out.close()\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.text",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axis"
]
] |
IntroCursos/PyQTEjemplos
|
[
"e465f825e09a01b98f5d1d40431658db67342da7"
] |
[
"VentanaLecturaDeExcel/OpenSavePandasGUI.py"
] |
[
"\n#pyuic5 -x mainwindow.ui -o output.py\n\n# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'mainwindow.ui'\n#\n# Created by: PyQt5 UI code generator 5.9.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PandasData import Mi_tabla\nimport pandas as pd\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(534, 379)\n self.centralWidget = QtWidgets.QWidget(MainWindow)\n self.centralWidget.setObjectName(\"centralWidget\")\n self.gridLayout = QtWidgets.QGridLayout(self.centralWidget)\n self.gridLayout.setContentsMargins(11, 11, 11, 11)\n self.gridLayout.setSpacing(6)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.label = QtWidgets.QLabel(self.centralWidget)\n self.label.setObjectName(\"label\")\n self.gridLayout.addWidget(self.label, 0, 0, 1, 1)\n self.openTable = QtWidgets.QPushButton(self.centralWidget)\n\n self.openTable.setObjectName(\"openTable\")\n self.gridLayout.addWidget(self.openTable, 1, 0, 1, 1)\n self.openTable.clicked.connect(self.CargaTabla)\n\n self.tableView = QtWidgets.QTableView(self.centralWidget)\n self.tableView.setObjectName(\"tableView\")\n self.gridLayout.addWidget(self.tableView, 2, 0, 1, 3)\n\n self.clearTable = QtWidgets.QPushButton(self.centralWidget)\n self.clearTable.setObjectName(\"clearTable\")\n self.gridLayout.addWidget(self.clearTable, 1, 1, 1, 1)\n self.clearTable.clicked.connect(self.LimpiarTabla)\n\n self.lineEdit = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit.setObjectName(\"lineEdit\")\n self.gridLayout.addWidget(self.lineEdit, 0, 1, 1, 2)\n\n self.saveTable = QtWidgets.QPushButton(self.centralWidget)\n self.saveTable.setObjectName(\"saveTable\")\n self.gridLayout.addWidget(self.saveTable, 1, 2, 1, 1)\n self.saveTable.clicked.connect(self.GuardarTabla)\n\n MainWindow.setCentralWidget(self.centralWidget)\n self.menuBar = QtWidgets.QMenuBar(MainWindow)\n self.menuBar.setGeometry(QtCore.QRect(0, 0, 534, 22))\n self.menuBar.setObjectName(\"menuBar\")\n self.menuFile = QtWidgets.QMenu(self.menuBar)\n self.menuFile.setObjectName(\"menuFile\")\n MainWindow.setMenuBar(self.menuBar)\n self.mainToolBar = QtWidgets.QToolBar(MainWindow)\n self.mainToolBar.setObjectName(\"mainToolBar\")\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)\n self.statusBar = QtWidgets.QStatusBar(MainWindow)\n self.statusBar.setObjectName(\"statusBar\")\n MainWindow.setStatusBar(self.statusBar)\n self.actionOpen = QtWidgets.QAction(MainWindow)\n self.actionOpen.setObjectName(\"actionOpen\")\n self.actionSave = QtWidgets.QAction(MainWindow)\n self.actionSave.setObjectName(\"actionSave\")\n self.actionClear = QtWidgets.QAction(MainWindow)\n self.actionClear.setObjectName(\"actionClear\")\n self.menuFile.addAction(self.actionOpen)\n self.menuFile.addAction(self.actionSave)\n self.menuFile.addAction(self.actionClear)\n self.menuBar.addAction(self.menuFile.menuAction())\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"TextLabel\"))\n self.openTable.setText(_translate(\"MainWindow\", \"OpenTable\"))\n self.clearTable.setText(_translate(\"MainWindow\", \"Clear Table\"))\n self.saveTable.setText(_translate(\"MainWindow\", \"Save Table\"))\n self.menuFile.setTitle(_translate(\"MainWindow\", \"File\"))\n self.actionOpen.setText(_translate(\"MainWindow\", \"Open\"))\n self.actionSave.setText(_translate(\"MainWindow\", \"Save\"))\n self.actionClear.setText(_translate(\"MainWindow\", \"Clear\"))\n\n def CargaTabla(self):\n #table = QtWidgets.QTableView()\n df =Mi_tabla()\n #df = pd.read_excel(\"./tabla.xlsx\",sheetname=\"hoja1\")\n mymodel = PandasModel(df)\n self.tableView.setModel(mymodel)\n\n def LimpiarTabla(self):\n\n self.tableView.setModel(None)\n\n def GuardarTabla(self):\n mymodel = self.tableView.model()\n print (type(mymodel) )\n print (type(mymodel._data) )\n print (len(mymodel._data.values))\n print (mymodel._data.values[0][0])\n df = mymodel._data\n #df.to_excel('foo.xlsx', sheet_name='Sheet1')\n print (df.index)\n # Specify a writer\n writer = pd.ExcelWriter('example2.xlsx', engine='xlsxwriter')\n # Write your DataFrame to a file\n #df.to_excel(writer, 'Sheet1')\n df.to_excel(writer, sheet_name='Sheet1')\n # Save the result\n writer.save()\n print(\"Segun eso se guardo\")\n\n\nclass PandasModel(QtCore.QAbstractTableModel):\n \"\"\"\n Class to populate a table view with a pandas dataframe\n \"\"\"\n\n def __init__(self, data, parent=None):\n QtCore.QAbstractTableModel.__init__(self, parent)\n self._data = data\n\n def rowCount(self, parent=None):\n return len(self._data.values)\n\n def columnCount(self, parent=None):\n return self._data.columns.size\n\n def data(self, index, role=QtCore.Qt.DisplayRole):\n if index.isValid():\n if role == QtCore.Qt.DisplayRole:\n if(index.column() != 0):\n #return str('%.2f'%self._data.values[index.row()][index.column()])\n return str(self._data.values[index.row()][index.column()])\n else:\n return str(self._data.values[index.row()][index.column()])\n return None\n\n def headerData(self, section, orientation, role):\n if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n return self._data.columns[section]\n elif orientation == QtCore.Qt.Vertical and role == QtCore.Qt.DisplayRole:\n return str(self._data.index[section])\n return None\n \"\"\"\n def flags(self, index):\n flags = super(self.__class__,self).flags(index)\n flags |= QtCore.Qt.ItemIsSelectable\n flags |= QtCore.Qt.ItemIsEnabled\n return flags\n \"\"\"\n\n def setData(self, index, value, role):\n self._data.iloc[[index.row()],[index.column()] ] = value\n return True\n\n def flags(self, index):\n return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n"
] |
[
[
"pandas.ExcelWriter"
]
] |
rohith-bs/dgraphpandas
|
[
"29e91e2e7bb1d5d991ab94709a2d7e27f7dd7316"
] |
[
"tests/strategies/test_horizontal.py"
] |
[
"import unittest\nfrom unittest.mock import patch, Mock\n\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal\nfrom parameterized import parameterized\n\nfrom dgraphpandas.strategies.horizontal import horizontal_transform\n\n\nclass HorizontalTests(unittest.TestCase):\n\n @parameterized.expand([\n (None, {'config': {}}, 'config_key'),\n (pd.DataFrame(), None, 'config_key'),\n (pd.DataFrame(), '', 'config_key'),\n (pd.DataFrame(), {'config': {}}, None),\n (pd.DataFrame(), {'config': {}}, ''),\n ])\n def test_horizontal_transform_null_parameters(self, frame, config, config_file_key):\n '''\n Ensures when parameters are null, then an\n error is raised\n '''\n with self.assertRaises(ValueError):\n horizontal_transform(frame, config, config_file_key)\n\n def test_horizontal_config_key_does_not_exist(self):\n '''\n Ensures when the config key does not exist\n within the config then an error is raised\n '''\n frame = pd.DataFrame()\n config_key = 'my_key'\n config = {\n 'files': {\n 'some_other_key': {}\n }\n }\n\n with self.assertRaises(KeyError):\n horizontal_transform(frame, config, config_key)\n\n @parameterized.expand([\n ('',),\n (None,),\n ])\n def test_horizontal_subject_fields_not_provided(self, subject_fields):\n '''\n Ensures when subject fields is not provided\n then an error is raised\n '''\n frame = pd.DataFrame()\n config_key = 'my_key'\n config = {\n 'files': {\n 'my_key': {\n 'subject_fields': subject_fields\n }\n }\n }\n\n with self.assertRaises(ValueError):\n horizontal_transform(frame, config, config_key)\n\n def test_horizontal_could_not_convert_type(self):\n '''\n Ensures when a type could not be applied to a column,\n then an error is raised\n '''\n frame = pd.DataFrame(data={\n 'customer_id': [1, 2, 3],\n 'age': [23, 'not number', 56]\n })\n config = {\n 'files': {\n 'customer': {\n 'subject_fields': ['customer_id'],\n 'type_overrides': {\n 'customer_id': 'int32',\n 'age': 'int32'\n }\n }\n }\n }\n config_file_key = 'customer'\n with self.assertRaises(SystemExit):\n horizontal_transform(frame, config, config_file_key)\n\n @parameterized.expand([\n ###\n (\n 'single_predicate',\n pd.DataFrame(data={\n 'customer_id': [1, 2, 3],\n 'age': [23, 67, 56]\n }),\n {\n 'files': {\n 'customer': {\n 'subject_fields': ['customer_id'],\n 'type_overrides': {\n 'customer_id': 'int32',\n 'age': 'int32'\n }\n }\n }\n },\n 'customer',\n pd.DataFrame(data={\n 'customer_id': pd.Series([1, 2, 3], dtype='int32'),\n 'predicate': pd.Series(['age']*3, dtype='O'),\n 'object': pd.Series([23, 67, 56], dtype='int32')\n })\n ),\n ###\n (\n 'multiple_predicates',\n pd.DataFrame(data={\n 'customer_id': [1, 2, 3],\n 'age': [23, 67, 56],\n 'weight': [189, 167, 190]\n }),\n {\n 'files': {\n 'customer': {\n 'subject_fields': ['customer_id'],\n 'type_overrides': {\n 'customer_id': 'int32',\n 'age': 'int32',\n 'weight': 'int32'\n }\n }\n }\n },\n 'customer',\n pd.DataFrame(data={\n 'customer_id': pd.Series([1, 2, 3, 1, 2, 3], dtype='int32'),\n 'predicate': pd.Series(['age']*3 + ['weight']*3, dtype='O'),\n 'object': pd.Series([23, 67, 56, 189, 167, 190], dtype='int32')\n })\n ),\n ###\n (\n 'multiple_subject_fields',\n pd.DataFrame(data={\n 'customer_id': [1, 2, 3],\n 'order_id': [405, 210, 321],\n 'value': [200, 321, 67],\n }),\n {\n 'files': {\n 'order': {\n 'subject_fields': ['customer_id', 'order_id'],\n 'type_overrides': {\n 'customer_id': 'int32',\n 'order_id': 'int32',\n 'value': 'int32'\n }\n }\n }\n },\n 'order',\n pd.DataFrame(data={\n 'customer_id': pd.Series([1, 2, 3], dtype='int32'),\n 'order_id': pd.Series([405, 210, 321], dtype='int32'),\n 'predicate': pd.Series(['value']*3, dtype='O'),\n 'object': pd.Series([200, 321, 67], dtype='int32')\n })\n )\n ])\n @patch('dgraphpandas.strategies.horizontal.vertical_transform')\n def test_horizontal_melted_passed(self, name, frame, config, config_file_key, expected_melted, transform_mock: Mock):\n '''\n Ensures that the passed horizontal frame is melted and\n passed into the vertical_transform.\n Also ensures the same config and key are passed through\n '''\n intrinsic_mock = Mock(spec=pd.DataFrame)\n edges_mock = Mock(spec=pd.DataFrame)\n transform_mock.return_value = (intrinsic_mock, edges_mock)\n\n intrinsic, edges = horizontal_transform(frame, config, config_file_key)\n\n transform_mock.assert_called_once()\n args, kwargs = transform_mock.call_args_list[0]\n invoked_frame, invoked_config, invoked_key = args\n\n assert_frame_equal(invoked_frame, expected_melted)\n self.assertEqual(invoked_config, config)\n self.assertEqual(invoked_key, config_file_key)\n self.assertEqual(kwargs, {})\n self.assertEqual(intrinsic_mock, intrinsic)\n self.assertEqual(edges_mock, edges)\n\n def test_horizontal_frame_only_has_subject_and_no_data_fields(self):\n '''\n Ensures when the horizontal frame only has subject fields\n and no actual data fields then an error is raised\n '''\n frame = pd.DataFrame(data={\n 'customer_id': [1, 2, 3],\n 'order_id': [405, 210, 321]\n })\n\n config = {\n 'files': {\n 'order': {\n 'subject_fields': ['customer_id', 'order_id'],\n 'type_overrides': {\n 'customer_id': 'int32',\n 'order_id': 'int32',\n }\n }\n }\n }\n config_key = 'order'\n\n with self.assertRaises(ValueError):\n horizontal_transform(frame, config, config_key)\n\n @patch('dgraphpandas.strategies.horizontal.vertical_transform')\n @patch('dgraphpandas.strategies.horizontal.pd.read_csv', spec=pd.read_csv)\n def test_horizontal_melted_file_path_passed(self, mock_pandas: Mock, mock_transform: Mock):\n '''\n Ensures when a file path(str) it passed into the transform, then the file\n is read using read_csv before going into logic.\n '''\n file = 'test.csv'\n frame = pd.DataFrame(data={\n 'customer_id': [1, 2, 3],\n 'age': [23, 67, 56]\n })\n config = {\n 'files': {\n 'customer': {\n 'subject_fields': ['customer_id'],\n 'type_overrides': {\n 'customer_id': 'int32',\n 'age': 'int32'\n }\n }\n }\n }\n config_file_key = 'customer'\n expected_melted = pd.DataFrame(data={\n 'customer_id': pd.Series([1, 2, 3], dtype='int32'),\n 'predicate': pd.Series(['age']*3, dtype='O'),\n 'object': pd.Series([23, 67, 56], dtype='int32')\n\n })\n\n mock_pandas.return_value = frame\n\n horizontal_transform(file, config, config_file_key)\n\n args, kwargs = mock_pandas.call_args_list[0]\n self.assertEqual(file, args[0])\n self.assertEqual({}, kwargs)\n\n args, kwargs = mock_transform.call_args_list[0]\n assert_frame_equal(expected_melted, args[0])\n self.assertEqual(config, args[1])\n self.assertEqual(config_file_key, args[2])\n\n @patch('dgraphpandas.strategies.horizontal.vertical_transform')\n @patch('dgraphpandas.strategies.horizontal.pd.read_csv', spec=pd.read_csv)\n def test_horizontal_melted_file_path_custom_csv_passed(self, mock_pandas: Mock, mock_transform: Mock):\n '''\n Ensures when a read_csv_options option is defined inside file configuration\n it is applied to the pd.read_csv call.\n '''\n file = 'test.csv'\n read_csv_options = {'sep': ';'}\n frame = pd.DataFrame(data={\n 'customer_id': [1, 2, 3],\n 'age': [23, 67, 56]\n })\n config = {\n 'files': {\n 'customer': {\n 'subject_fields': ['customer_id'],\n 'type_overrides': {\n 'customer_id': 'int32',\n 'age': 'int32'\n },\n 'read_csv_options': read_csv_options\n }\n }\n }\n config_file_key = 'customer'\n expected_melted = pd.DataFrame(data={\n 'customer_id': pd.Series([1, 2, 3], dtype='int32'),\n 'predicate': pd.Series(['age']*3, dtype='O'),\n 'object': pd.Series([23, 67, 56], dtype='int32')\n })\n\n mock_pandas.return_value = frame\n\n horizontal_transform(file, config, config_file_key)\n\n args, kwargs = mock_pandas.call_args_list[0]\n self.assertEqual(file, args[0])\n self.assertEqual(read_csv_options, kwargs)\n\n args, kwargs = mock_transform.call_args_list[0]\n assert_frame_equal(expected_melted, args[0])\n self.assertEqual(config, args[1])\n self.assertEqual(config_file_key, args[2])\n\n @parameterized.expand([\n ###\n (\n 'year_wrong_order',\n {'dob': {'format': \"%Y-%m-%d\"}},\n pd.DataFrame(data={\n 'customer_id': [1, 2],\n 'dob': ['03-02-2021', '01-03-1945'],\n 'weight': [50, 32]\n })\n ),\n ###\n (\n 'alphanumerical_string',\n {'dob': {'format': \"%Y-%m-%d\"}},\n pd.DataFrame(data={\n 'customer_id': [1, 2],\n 'dob': ['not a date', '01-03-1945'],\n 'weight': [50, 32]\n })\n ),\n ###\n (\n 'missing_dashes',\n {'dob': {'format': \"%Y-%m%d\"}},\n pd.DataFrame(data={\n 'customer_id': [1, 2],\n 'dob': ['2021-03-02', '19450301'],\n 'weight': [50, 32]\n })\n ),\n ###\n (\n 'missing_dots',\n {'dob': {'format': \"%Y.%m.%d\"}},\n pd.DataFrame(data={\n 'customer_id': [1, 2],\n 'dob': ['2021-03-02', '1945.03&01'],\n 'weight': [50, 32]\n })\n ),\n ###\n (\n 'malformed_month_string',\n {'dob': {'format': \"%d-%b-%Y\"}},\n pd.DataFrame(data={\n 'customer_id': [1, 2],\n 'dob': ['02-FebFake-2021', '01-Mar-1945'],\n 'weight': [50, 32]\n })\n )\n ])\n @patch('dgraphpandas.strategies.horizontal.vertical_transform')\n def test_horizontal_transform_incorrect_date_format(self, name, date_format, frame, transform_mock: Mock):\n '''\n Ensures when the date format provided does not match the value within the frame,\n then an error is raised.\n '''\n config_file_key = 'customer'\n config = {\n 'files': {\n config_file_key: {\n 'subject_fields': ['customer_id'],\n 'date_fields': date_format\n }\n }\n }\n\n with self.assertRaisesRegex(ValueError, \"time data (.*) (doesn't|does not) match format(.*)\"):\n horizontal_transform(frame, config, config_file_key)\n transform_mock.assert_not_called()\n\n @parameterized.expand([\n ###\n (\n 'uncoverted_month_day',\n {'dob': {'format': \"%Y\"}},\n pd.DataFrame(data={\n 'customer_id': [1, 2],\n 'dob': ['2021-03-02', '1945-03-01'],\n 'weight': [50, 32]\n })\n ),\n ###\n (\n 'uncoverted_month_year',\n {'dob': {'format': \"%m-%d\"}},\n pd.DataFrame(data={\n 'customer_id': [1, 2],\n 'dob': ['03-02-2021', '03-01-2021'],\n 'weight': [50, 32]\n })\n )\n ])\n @patch('dgraphpandas.strategies.horizontal.vertical_transform')\n def test_horizontal_transform_unconverted_date_parts(self, name, date_format, frame, transform_mock: Mock):\n '''\n Ensures when the date partially matches and there are some converted\n parts, an error is raised\n '''\n config_file_key = 'customer'\n config = {\n 'files': {\n config_file_key: {\n 'subject_fields': ['customer_id'],\n 'date_fields': date_format\n }\n }\n }\n\n with self.assertRaisesRegex(ValueError, \"unconverted data remains: (.*)\"):\n horizontal_transform(frame, config, config_file_key)\n transform_mock.assert_not_called()\n\n @parameterized.expand([\n ###\n (\n 'dash_format',\n {'dob': {'format': \"%Y-%m-%d\"}},\n pd.DataFrame(data={\n 'customer_id': [1, 2],\n 'dob': ['2021-03-02', '1945-03-01'],\n 'weight': [50, 32]\n }),\n pd.DataFrame(data={\n 'customer_id': [1, 2, 1, 2],\n 'predicate': ['dob', 'dob', 'weight', 'weight'],\n 'object':[pd.to_datetime('2021-03-02 00:00:00'), pd.to_datetime('1945-03-01 00:00:00'), 50, 32]\n })\n ),\n ###\n (\n 'dot_format',\n {'dob': {'format': \"%Y.%m.%d\"}},\n pd.DataFrame(data={\n 'customer_id': [1, 2],\n 'dob': ['1999.05.09', '1789.02.12'],\n 'weight': [50, 32]\n }),\n pd.DataFrame(data={\n 'customer_id': [1, 2, 1, 2],\n 'predicate': ['dob', 'dob', 'weight', 'weight'],\n 'object': [pd.to_datetime('1999-05-09 00:00:00'), pd.to_datetime('1789-02-12 00:00:00'), 50, 32]\n })\n ),\n ###\n (\n 'multiple_date_fields',\n {'updated_at': {'format': '%Y.%m.%d'}, 'dob': {'format': \"%Y.%m.%d\"}},\n pd.DataFrame(data={\n 'customer_id': [1, 2],\n 'dob': ['1999.05.09', '1789.02.12'],\n 'updated_at': ['2021.03.02', '2021.03.04'],\n 'weight': [50, 32]\n }),\n pd.DataFrame(data={\n 'customer_id': [1, 2, 1, 2, 1, 2],\n 'predicate': ['dob', 'dob', 'updated_at', 'updated_at', 'weight', 'weight'],\n 'object': [\n pd.to_datetime('1999-05-09 00:00:00'),\n pd.to_datetime('1789-02-12 00:00:00'),\n pd.to_datetime('2021-03-02 00:00:00'),\n pd.to_datetime('2021-03-04 00:00:00'),\n 50,\n 32]\n })\n ),\n ###\n (\n 'multiple_date_fields_different_formats',\n {'updated_at': {'format': '%Y$%m$%d'}, 'dob': {'format': \"%Y.%m.%d\"}},\n pd.DataFrame(data={\n 'customer_id': [1, 2],\n 'dob': ['1999.05.09', '1789.02.12'],\n 'updated_at': ['2021$03$02', '2021$03$04'],\n 'weight': [50, 32]\n }),\n pd.DataFrame(data={\n 'customer_id': [1, 2, 1, 2, 1, 2],\n 'predicate': ['dob', 'dob', 'updated_at', 'updated_at', 'weight', 'weight'],\n 'object': [\n pd.to_datetime('1999-05-09 00:00:00'),\n pd.to_datetime('1789-02-12 00:00:00'),\n pd.to_datetime('2021-03-02 00:00:00'),\n pd.to_datetime('2021-03-04 00:00:00'),\n 50,\n 32]\n })\n )\n ])\n @patch('dgraphpandas.strategies.horizontal.vertical_transform')\n def test_horizontal_transform_correct_date_format(self, name, date_format, frame, expected_melted, transform_mock: Mock):\n '''\n Ensures when the date_format provided is in the correct format,\n no error is raised\n '''\n config_file_key = 'customer'\n config = {\n 'files': {\n config_file_key: {\n 'subject_fields': ['customer_id'],\n 'date_fields': date_format\n }\n }\n }\n\n horizontal_transform(frame, config, config_file_key)\n\n transform_mock.assert_called_once()\n args, kwargs = transform_mock.call_args_list[0]\n\n passed_frame, passed_config, passed_config_key = args\n\n assert_frame_equal(passed_frame, expected_melted)\n self.assertEqual(passed_config, config)\n self.assertEqual(passed_config_key, config_file_key)\n self.assertEqual(kwargs, {})\n"
] |
[
[
"pandas.to_datetime",
"pandas.testing.assert_frame_equal",
"pandas.Series",
"pandas.DataFrame"
]
] |
jjimmykang/bwsi-backprojection
|
[
"440e21f90e2a1d0d1c28bfd9a0faaf97129378a5"
] |
[
"legacy/main_code/pulson440/unpack_batch.py"
] |
[
"import numpy as np\nfrom control import unpack\nimport argparse\nimport pickle\n\n\ndef main():\n parser = argparse.ArgumentParser(description='batch unpacks data')\n parser.add_argument('data_dir', type=str, help='the directory for all of the scan datas')\n parser.add_argument('num_files', type=int, help='the number of files')\n args = parser.parse_args()\n\n data = []\n for i in range(1, args.num_files + 1):\n data.append(unpack(args.data_dir + '/scan_' + str(i) + '.txt'))\n\n np_data = np.asarray(data)\n print(list(data[1].keys()))\n\n master_dict = {'scan_data': [], 'timestamps': [], 'pulse_idx': [], 'range_bins': [], 'packet_idx': [], 'config': []}\n for i in data:\n master_dict['scan_data'].append(i['scan_data'])\n master_dict['timestamps'].append(i['timestamps'])\n master_dict['pulse_idx'].append(i['pulse_idx'])\n master_dict['range_bins'].append(i['range_bins'])\n master_dict['packet_idx'].append(i['packet_idx'])\n master_dict['config'].append(i['config'])\n\n\n\n\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.asarray"
]
] |
KiloSat/ML_Algos
|
[
"4375bdadeca91a8bfa5ca9dedf6202d3252b96d7"
] |
[
"ML_Algos/apps/linear_regression_2.py"
] |
[
"import streamlit as st \r\n\r\ndef app():\r\n\tst.write(\"Predicting House Prices with multiple variables\")\r\n\timport pandas as pd\r\n\timport matplotlib.pyplot as plt\r\n\timport numpy as np\r\n\tfrom sklearn import linear_model\r\n\r\n\tdata = pd.read_csv('housing.csv')\r\n\tdata = data[:50] #taking just 50 rows from the excel file\r\n\tst.subheader('Contents of the Housing Excel File')\r\n\tst.write(data)\r\n\r\n\r\n\tst.subheader('Choosing the variables which best correspond to median_house_value')\r\n\r\n\tplt.xlabel('households')\r\n\tplt.ylabel('median_house_value (in dollars)')\r\n\tplt.scatter(data.households,data.median_house_value)\r\n\tst.set_option('deprecation.showPyplotGlobalUse', False)\r\n\tst.pyplot()\r\n\r\n\tplt.xlabel('total rooms')\r\n\tplt.ylabel('median_house_value (in dollars)')\r\n\tplt.scatter(data.total_rooms,data.median_house_value)\r\n\tst.pyplot()\r\n\r\n\r\n\tplt.xlabel('median_income')\r\n\tplt.ylabel('median_house_value (in dollars)')\r\n\tplt.scatter(data.median_income,data.median_house_value)\r\n\tst.pyplot()\r\n\r\n\tst.write('As the plot above indicates, there is a strong correlation between the above three predictor variables and our target variable')\r\n\t\r\n\tst.subheader('We will now use these to train our model')\r\n\r\n\tmodel = linear_model.LinearRegression() #loading the model from the library\r\n\tmodel.fit(data[['median_income','total_rooms','households']],data.median_house_value)\r\n\r\n\tst.subheader('We have trained our model')\r\n\r\n\tst.subheader('We will predict the house prices for values 7.2574,1467,177 for the 3 predictors respectively')\r\n\tst.subheader('Prediction : {}'.format(model.predict([[7.2574,1467,177]]))) #I took values from row 3 of our dataframes :)\r\n\r\n\tst.subheader(\"We will now see the relation between our 3-dimensional prediction and each of the predictor variables\")\r\n\t\r\n\tplt.xlabel('households')\r\n\tplt.ylabel('median_house_value(in dollars)')\r\n\tplt.scatter(data.households,data.median_house_value)\r\n\tplt.plot(data.households,model.predict(data[['median_income','total_rooms','households']]),color='red')\r\n\tst.pyplot()\r\n\r\n\tplt.xlabel('median income(in 10 thousand dollars)')\r\n\tplt.ylabel('median house value(in dollars)')\r\n\tplt.scatter(data.median_income,data.median_house_value)\r\n\tplt.plot(data.median_income,model.predict(data[['median_income','total_rooms','households']]),color='red') #plotting our model predicted line\r\n\tst.pyplot()\r\n\r\n\tplt.xlabel('total rooms')\r\n\tplt.ylabel('median house value(in dollars)')\r\n\tplt.scatter(data.total_rooms,data.median_house_value)\r\n\tplt.plot(data.total_rooms,model.predict(data[['median_income','total_rooms','households']]),color='red') #plotting our model predicted line\r\n\tst.pyplot()"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
trevorki/dsci-532_group-20
|
[
"6bba74c08a0b66c4f7986c20a05b4a2dfce7f279"
] |
[
"src/hotel_cleaner.py"
] |
[
"# hotel_cleaner.py\n# author: Trevor Kinsey\n# date: 2021-01-19\n\n'''This script cleans and wrangles the hotels.csv file to be used\n in a visualization dashboard app\n\nUsage: python hotel_cleaner.py\n\n'''\nimport numpy as np\nimport pandas as pd\n\n# import the data\nhotels = pd.read_csv(\"data/raw/hotels.csv\")\n\n# create new columns from other columns\nmonths = [\"January\", \"February\", \"March\", \"April\",\n \"May\", \"June\", \"July\", \"August\", \"September\", \n \"October\", \"November\", \"December\"]\nhotels[\"arrival_date_month\"] = hotels[\"arrival_date_month\"].replace(months,[1,2,3,4,5,6,7,8,9,10,11,12])\nhotels[\"Arrival date\"] = pd.to_datetime(hotels.arrival_date_year*10000 + hotels.arrival_date_month*100 + hotels.arrival_date_day_of_month, \n format = '%Y%m%d')\nhotels[\"Arrival day of week\"] = hotels[\"Arrival date\"].dt.dayofweek\nhotels[\"Arrival day of week\"] = hotels[\"Arrival day of week\"].replace([0,1,2,3,4,5,6],[\"Mon\", \"Tues\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\"])\nhotels[\"Total nights\"] = hotels[\"stays_in_weekend_nights\"] + hotels[\"stays_in_week_nights\"]\n# drop unused columns\nhotels = hotels.drop(columns=['agent', 'company', 'lead_time',\n 'market_segment', 'distribution_channel',\n 'is_repeated_guest', 'previous_cancellations',\n 'previous_bookings_not_canceled', 'reserved_room_type',\n 'assigned_room_type','deposit_type',\n 'days_in_waiting_list', 'customer_type', 'reservation_status', \n 'reservation_status_date', 'meal'], )\n# Change values to make more readable\nhotels[\"hotel\"] = hotels[\"hotel\"].replace([\"Resort Hotel\", \"City Hotel\"], [\"Resort\", \"City\"])\n# change column names to make more readable\nhotels.columns = ['Hotel type', 'Cancelled', 'Arrival year',\n 'Arrival month', 'Arrival week', 'Arrival day', 'Weekend nights', \n 'Week nights', 'Adults', 'Children', 'Babies', 'Country of origin', \n 'Booking changes', 'Average daily rate', 'Required parking spaces', \n 'Special requests', \"Arrival date\", \"Arrival day of week\", 'Total nights']\n \n# # create date index column for date slider\n# hotels[\"arrival_date_index\"] = pd.to_numeric(hotels[\"Arrival date\"])/60/60/24/10**9\n# start_date = hotels[\"arrival_date_index\"][0]\n# hotels[\"arrival_date_index\"] -= start_date\n\n# save to file\nhotels.to_csv(\"data/processed/clean_hotels.csv\", index = False)"
] |
[
[
"pandas.read_csv",
"pandas.to_datetime"
]
] |
i5misswrong/CA_RL
|
[
"123df432276e3718e8287ee0835c9a0ba5fa9698"
] |
[
"tensorflow_book/MNIST_Learning/cnn_view_01.py"
] |
[
"import tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\n\r\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\r\n# GPU参数配置\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\n\r\ntrain_epochs = 100 # 训练轮数\r\nbatch_size = 100 # 随机出去数据大小\r\ndisplay_step = 1 # 显示训练结果的间隔\r\nlearning_rate = 0.0001 # 学习效率\r\ndrop_prob = 0.5 # 正则化,丢弃比例\r\nfch_nodes = 512 # 全连接隐藏层神经元的个数\r\n\r\n\r\n# 权重初始化\r\ndef weight_init(shape):\r\n weights = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)\r\n return tf.Variable(weights)\r\n\r\n\r\n# 偏置初始化\r\ndef biases_init(shape):\r\n biases = tf.random_normal(shape, dtype=tf.float32)\r\n return tf.Variable(biases)\r\n\r\n\r\n# 随机选取bitchj\r\ndef get_random_batchData(n_samples, batchsize):\r\n start_index = np.random.randint(0, n_samples - batch_size)\r\n return (start_index, start_index + batch_size)\r\n\r\n\r\n# 全链接层 权重初始化\r\ndef xavier_init(layer1, layer2, constant=1):\r\n Min = -constant * np.sqrt(6.0 / (layer1 + layer2))\r\n Max = constant * np.sqrt(6.0 / (layer1 + layer2))\r\n return tf.Variable(tf.random_uniform((layer1, layer2), minval=Min, maxval=Max, dtype=tf.float32))\r\n\r\n\r\n# 卷积\r\ndef conv2d(x, w):\r\n # x为图形像素 w为卷积核\r\n return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')\r\n\r\n\r\n# 池化\r\ndef max_pool_2x2(x):\r\n # x是卷积后 经过激活函数后的图像 ksize的池化滑动张量 ksize 的维度[batch, height, width, channels],跟 x 张量相同\r\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\r\n\r\n\r\n# 创建占位符 x是图像 y是标签!!!\r\nx = tf.placeholder(tf.float32, [None, 784])\r\ny = tf.placeholder(tf.float32, [None, 10])\r\n# 将一维图像转化成二维\r\nx_img = tf.reshape(x, [-1, 28, 28, 1])\r\n\r\n# 第一层卷积+池化\r\nw_conv1 = weight_init([5, 5, 1, 16])\r\nb_conv1 = biases_init([16])\r\nh_conv1 = tf.nn.relu(conv2d(x_img, w_conv1) + b_conv1)\r\nh_pool1 = max_pool_2x2(h_conv1)\r\n\r\n# 第二层卷积+池化\r\nw_conv2 = weight_init([5, 5, 16, 32])\r\nb_conv2 = biases_init([32])\r\nh_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)\r\nh_pool2 = max_pool_2x2(h_conv1)\r\n\r\n# 全连接层\r\nh_fpool2 = tf.reshape(h_pool2, [-1, 7 * 7 * 32])\r\n\r\nw_fc1 = xavier_init(7 * 7 * 32, fch_nodes)\r\nb_fc1 = biases_init([fch_nodes])\r\nh_fc1 = tf.nn.relu(tf.matmul(h_fpool2, w_fc1) + b_fc1)\r\n\r\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=drop_prob)\r\n\r\nw_fc2 = xavier_init(fch_nodes, 10)\r\nb_fc2 = biases_init([10])\r\n\r\ny_ = tf.add(tf.matmul(h_fc1_drop, w_fc2), b_fc2)\r\ny_out = tf.nn.softmax(y_)\r\n\r\n# 交叉熵代价函数\r\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_out), reduction_indices=[1]))\r\n\r\n#\r\noptimizer = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)\r\n\r\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_out, 1))\r\naccutacy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\ninit = tf.global_variables_initializer()\r\n\r\nn_samples = int(mnist.train.num_examples)\r\ntotal_batches = int(n_samples / batch_size)\r\n\r\nwith tf.Session(config=config) as sess:\r\n sess.run(init)\r\n # Cost=[]\r\n # Accuracy=[]\r\n # for i in range(train_epochs):\r\n # for j in range(100):\r\n # start_index,end_index=get_random_batchData(n_samples,batch_size)\r\n # batch_x=mnist.train.images[start_index:end_index]\r\n # batch_y=mnist.train.labels[start_index:end_index]\r\n # sess.run([optimizer,cross_entropy,accutacy],feed_dict={x:batch_x,y:batch_y})\r\n # Cost.append()\r\n #\r\n #\r\n\r\n iput_imgae=mnist.train.images[0]\r\n iput_imgae_2=mpimg.imread('111.png')\r\n iput_imgae_2.shape\r\n plt.imshow(iput_imgae_2)\r\n plt.show()\r\n # conv1_16=sess.run(h_conv1,feed_dict={x:iput_imgae})\r\n # conv1_traonspose=sess.run(tf.transpose(conv1_16,[3,0,1,2]))\r\n # fig,ax=plt.subplot(rows=1,cols=16,figzise=(16,1))\r\n # for i in range(16):\r\n # ax[i].imshow(conv1_traonspose[i][0])\r\n # print(len(iput_imgae))\r\n # img_shape=iput_imgae.reshape(28,28)\r\n # # for i in iput_imgae[3][0]:\r\n # # print(i)\r\n # # plt.imshow(conv1_traonspose[3][0])\r\n # plt.imshow(img_shape)\r\n # plt.show()"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.sqrt",
"tensorflow.nn.max_pool",
"tensorflow.cast",
"matplotlib.image.imread",
"tensorflow.train.AdamOptimizer",
"numpy.random.randint",
"tensorflow.nn.conv2d",
"tensorflow.Variable",
"tensorflow.ConfigProto",
"tensorflow.Session",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.truncated_normal",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.show",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.log",
"tensorflow.random_uniform",
"tensorflow.random_normal"
]
] |
binzhihao/py-ai-project
|
[
"64491f6e5ff2007fd7abad5ee0c6058a914d1f84"
] |
[
"nn/tf_fnn_bn.py"
] |
[
"from functools import partial\nimport tensorflow as tf\nimport numpy as np\nimport data.mnist.loader as loader\n\nOPTIMIZER = 'adam'\n\nif __name__ == '__main__':\n\n n_inputs = 28 * 28\n n_hidden1 = 300\n n_hidden2 = 100\n n_outputs = 10\n\n learning_rate = 0.01\n n_epochs = 40\n batch_size = 200\n batch_norm_momentum = 0.9\n\n def shuffle_batch(X, y, size):\n rnd_idx_ = np.random.permutation(len(X))\n n_batches_ = len(X) // size\n for batch_idx_ in np.array_split(rnd_idx_, n_batches_):\n X_batch_, y_batch_ = X[batch_idx_], y[batch_idx_]\n yield X_batch_, y_batch_\n\n # prepare data\n X_train, y_train = loader.load_mnist(kind='train')\n X_train = X_train.astype(np.float32) / 255.0\n y_train = y_train.astype(np.int32)\n\n X_test, y_test = loader.load_mnist(kind='test')\n X_test = X_test.astype(np.float32) / 255.0\n y_test = y_test.astype(np.int32)\n\n X_valid, X_train = X_train[:5000], X_train[5000:]\n y_valid, y_train = y_train[:5000], y_train[5000:]\n\n # placeholder\n X = tf.placeholder(tf.float32, shape=(None, n_inputs), name='X')\n y = tf.placeholder(tf.int64, shape=None, name='y')\n # 给Batch norm加一个placeholder\n training = tf.placeholder_with_default(False, shape=(), name='training')\n\n with tf.name_scope(\"dnn\"):\n he_init = tf.contrib.layers.variance_scaling_initializer()\n my_batch_norm_layer = partial(tf.layers.batch_normalization, training=training, momentum=batch_norm_momentum)\n my_dense_layer = partial(tf.layers.dense, kernel_initializer=he_init)\n\n hidden1 = my_dense_layer(X, n_hidden1, name='hidden1')\n bn1 = tf.nn.elu(my_batch_norm_layer(hidden1))\n hidden2 = my_dense_layer(bn1, n_hidden2, name='hidden2')\n bn2 = tf.nn.elu(my_batch_norm_layer(hidden2))\n logists_before_bn = my_dense_layer(bn2, n_outputs, name='outputs')\n logists = my_batch_norm_layer(logists_before_bn)\n\n with tf.name_scope('loss'):\n xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = y, logits= logists)\n loss = tf.reduce_mean(xentropy, name='loss')\n\n with tf.name_scope('train'):\n if OPTIMIZER == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate)\n else:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n\n training_op = optimizer.minimize(loss)\n\n with tf.name_scope(\"eval\"):\n correct = tf.nn.in_top_k(logists, y, 1)\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n\n # create variables initializer\n init = tf.global_variables_initializer()\n # create a saver\n saver = tf.train.Saver()\n\n # 注意:由于我们使用的是 tf.layers.batch_normalization() 而不是 tf.contrib.layers.batch_norm()(如本书所述),\n # 所以我们需要明确运行批量规范化所需的额外更新操作(sess.run([ training_op,extra_update_ops], ...)。\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n with tf.Session() as sess:\n init.run()\n for epoch in range(n_epochs):\n X_batch = None\n y_batch = None\n for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):\n sess.run([training_op, extra_update_ops], feed_dict={training: True, X: X_batch, y: y_batch})\n acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n acc_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})\n print(epoch, \"Batch accuracy:\", acc_batch, \"Val accuracy:\", acc_val)\n"
] |
[
[
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.reduce_mean",
"tensorflow.get_collection",
"tensorflow.placeholder_with_default",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.Saver",
"tensorflow.nn.in_top_k",
"numpy.array_split"
]
] |
artzaza7/WebAppZescampGroup_9
|
[
"fa6ae018db399104af1a4837046d519ba74aad93"
] |
[
"Test.py"
] |
[
"from scipy.spatial import distance as dist\nimport numpy as np\nimport imutils\nimport cv2\nimport os\n\n# base path to YOLO directory\nMODEL_PATH = \"yolo-coco\"\n\n# initialize minimum probability to filter weak detections along with\n# the threshold when applying non-maxima suppression\nMIN_CONF = 0.3\nNMS_THRESH = 0.3\n\n# boolean indicating if NVIDIA CUDA GPU should be used\nUSE_GPU = False\n\n# define the minimum safe distance (in pixels) that two people can be\n# from each other\nMIN_DISTANCE = 50\n\n# load the COCO class labels our YOLO model was trained on\nlabelsPath = os.path.sep.join([MODEL_PATH, \"obj.names\"])\nLABELS = open(labelsPath).read().strip().split(\"\\n\")\n\nprint(LABELS)\n\nprint(len(LABELS))\n\n# derive the paths to the YOLO weights and model configuration\nweightsPath = os.path.sep.join([MODEL_PATH, \"yolov4.weights\"])\nconfigPath = os.path.sep.join([MODEL_PATH, \"yolov4.cfg\"])\n\n# load our YOLO object detector trained on COCO dataset (80 classes)\nprint(\"[INFO] loading YOLO from disk...\")\nnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\n\n\n# check if we are going to use GPU\nif USE_GPU:\n # set CUDA as the preferable backend and target\n print(\"Setting preferable backend and target to CUDA...\")\n net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n\n# determine only the *output* layer names that we need from YOLO\nln = net.getLayerNames()\nln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\nprint(\"Accessing video stream...\")\n#upload the video file you want to check social distancing for- below.\nvs = cv2.VideoCapture(\"pedestrians.mp4\")\n#src.upload(vs)\nfps = vs.get(cv2.CAP_PROP_FPS)\nprint(\"FPS of the current video: \",fps)\n\nnum_frames = vs.get(cv2.CAP_PROP_FRAME_COUNT)\nprint(\"Number of frames in the video: \",num_frames)\n\nwriter = None\n\ndef detect_people(frame, net, ln, personIdx=0):\n # grab the dimensions of the frame and initialize the list of\n # results\n (H, W) = frame.shape[:2]\n results = []\n\n # construct a blob from the input frame and then perform a forward\n # pass of the YOLO object detector, giving us our bounding boxes\n # and associated probabilities\n blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n net.setInput(blob)\n layerOutputs = net.forward(ln)\n\n # initialize our lists of detected bounding boxes, centroids, and\n # confidences, respectively\n boxes = []\n centroids = []\n confidences = []\n\n # loop over each of the layer outputs\n for output in layerOutputs:\n # loop over each of the detections\n for detection in output:\n # extract the class ID and confidence (i.e., probability)\n # of the current object detection\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n\n # filter detections by (1) ensuring that the object\n # detected was a person and (2) that the minimum\n # confidence is met\n if classID == personIdx and confidence > MIN_CONF:\n # scale the bounding box coordinates back relative to\n # the size of the image, keeping in mind that YOLO\n # actually returns the center (x, y)-coordinates of\n # the bounding box followed by the boxes' width and\n # height\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype(\"int\")\n\n # use the center (x, y)-coordinates to derive the top\n # and and left corner of the bounding box\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n # update our list of bounding box coordinates,\n # centroids, and confidences\n boxes.append([x, y, int(width), int(height)])\n centroids.append((centerX, centerY))\n confidences.append(float(confidence))\n\n # apply non-maxima suppression to suppress weak, overlapping\n # bounding boxes\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, MIN_CONF, NMS_THRESH)\n\n # ensure at least one detection exists\n if len(idxs) > 0:\n # loop over the indexes we are keeping\n for i in idxs.flatten():\n # extract the bounding box coordinates\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n\n # update our results list to consist of the person\n # prediction probability, bounding box coordinates,\n # and the centroid\n r = (confidences[i], (x, y, x + w, y + h), centroids[i])\n results.append(r)\n\n # return the list of results\n return results\n\ndisplay = 1\noutput = \"Output_Art_file.avi\"\n\nwhile True:\n # read the next frame from the file\n (grabbed, frame) = vs.read()\n\n # if the frame was not grabbed, then we have reached the end\n # of the stream\n if not grabbed:\n break\n\n # resize the frame and then detect people (and only people) in it\n frame = imutils.resize(frame, width=700)\n results = detect_people(frame, net, ln, personIdx=LABELS.index(\"person\"))\n\n # initialize the set of indexes that violate the minimum social\n # distance\n violate = set()\n\n # ensure there are *at least* two people detections (required in\n # order to compute our pairwise distance maps)\n if len(results) >= 2:\n # extract all centroids from the results and compute the\n # Euclidean distances between all pairs of the centroids\n centroids = np.array([r[2] for r in results])\n D = dist.cdist(centroids, centroids, metric=\"euclidean\")\n\n # loop over the upper triangular of the distance matrix\n for i in range(0, D.shape[0]):\n for j in range(i + 1, D.shape[1]):\n # check to see if the distance between any two\n # centroid pairs is less than the configured number\n # of pixels\n if D[i, j] < MIN_DISTANCE:\n # update our violation set with the indexes of\n # the centroid pairs\n violate.add(i)\n violate.add(j)\n\n # loop over the results\n for (i, (prob, bbox, centroid)) in enumerate(results):\n # extract the bounding box and centroid coordinates, then\n # initialize the color of the annotation\n (startX, startY, endX, endY) = bbox\n (cX, cY) = centroid\n color = (0, 255, 0)\n\n # if the index pair exists within the violation set, then\n # update the color\n if i in violate:\n color = (0, 0, 255)\n\n # draw (1) a bounding box around the person and (2) the\n # centroid coordinates of the person,\n cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\n cv2.circle(frame, (cX, cY), 5, color, 1)\n\n # draw the total number of social distancing violations on the\n # output frame\n text = \"Social Distancing Violations: {}\".format(len(violate))\n cv2.putText(frame, text, (10, frame.shape[0] - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)\n\n # check to see if the output frame should be displayed to our\n # screen\n if display > 0:\n # show the output frame\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n # if an output video file path has been supplied and the video\n # writer has not been initialized, do so now\n if output != \"\" and writer is None:\n # initialize our video writer\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n writer = cv2.VideoWriter(output, fourcc, 25, (frame.shape[1], frame.shape[0]), True)\n\n # if the video writer is not None, write the frame to the output\n # video file\n if writer is not None:\n writer.write(frame)"
] |
[
[
"numpy.array",
"numpy.argmax",
"scipy.spatial.distance.cdist"
]
] |
ramkpari/mne-python
|
[
"7b428b96c2a719902ae72a498618062f879d674c"
] |
[
"mne/viz/backends/_notebook.py"
] |
[
"\"\"\"Notebook implementation of _Renderer and GUI.\"\"\"\n\n# Authors: Guillaume Favelier <[email protected]>\n#\n# License: Simplified BSD\n\nfrom contextlib import contextmanager\nfrom IPython.display import display\nfrom ipywidgets import (Button, Dropdown, FloatSlider, FloatText, HBox,\n IntSlider, IntText, Text, VBox, IntProgress)\n\nfrom ...fixes import nullcontext\nfrom ._abstract import (_AbstractDock, _AbstractToolBar, _AbstractMenuBar,\n _AbstractStatusBar, _AbstractLayout, _AbstractWidget,\n _AbstractWindow, _AbstractMplCanvas, _AbstractPlayback,\n _AbstractBrainMplCanvas, _AbstractMplInterface)\nfrom ._pyvista import _PyVistaRenderer, _close_all, _set_3d_view, _set_3d_title # noqa: F401,E501, analysis:ignore\n\n\nclass _IpyLayout(_AbstractLayout):\n def _layout_initialize(self, max_width):\n self._layout_max_width = max_width\n\n def _layout_add_widget(self, layout, widget, stretch=0):\n widget.layout.margin = \"2px 0px 2px 0px\"\n widget.layout.min_width = \"0px\"\n children = list(layout.children)\n children.append(widget)\n layout.children = tuple(children)\n # Fix columns\n if self._layout_max_width is not None and isinstance(widget, HBox):\n children = widget.children\n width = int(self._layout_max_width / len(children))\n for child in children:\n child.layout.width = f\"{width}px\"\n\n\nclass _IpyDock(_AbstractDock, _IpyLayout):\n def _dock_initialize(self, window=None):\n self._dock_width = 300\n self._dock = self._dock_layout = VBox()\n self._dock.layout.width = f\"{self._dock_width}px\"\n self._layout_initialize(self._dock_width)\n\n def _dock_finalize(self):\n pass\n\n def _dock_show(self):\n self._dock_layout.layout.visibility = \"visible\"\n\n def _dock_hide(self):\n self._dock_layout.layout.visibility = \"hidden\"\n\n def _dock_add_stretch(self, layout):\n pass\n\n def _dock_add_layout(self, vertical=True):\n return VBox() if vertical else HBox()\n\n def _dock_add_label(self, value, align=False, layout=None):\n layout = self._dock_layout if layout is None else layout\n widget = Text(value=value, disabled=True)\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_add_button(self, name, callback, layout=None):\n widget = Button(description=name)\n widget.on_click(lambda x: callback())\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_named_layout(self, name, layout, compact):\n layout = self._dock_layout if layout is None else layout\n if name is not None:\n hlayout = self._dock_add_layout(not compact)\n self._dock_add_label(\n value=name, align=not compact, layout=hlayout)\n self._layout_add_widget(layout, hlayout)\n layout = hlayout\n return layout\n\n def _dock_add_slider(self, name, value, rng, callback,\n compact=True, double=False, layout=None):\n layout = self._dock_named_layout(name, layout, compact)\n klass = FloatSlider if double else IntSlider\n widget = klass(\n value=value,\n min=rng[0],\n max=rng[1],\n readout=False,\n )\n widget.observe(_generate_callback(callback), names='value')\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_add_spin_box(self, name, value, rng, callback,\n compact=True, double=True, layout=None):\n layout = self._dock_named_layout(name, layout, compact)\n klass = FloatText if double else IntText\n widget = klass(\n value=value,\n min=rng[0],\n max=rng[1],\n readout=False,\n )\n widget.observe(_generate_callback(callback), names='value')\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_add_combo_box(self, name, value, rng,\n callback, compact=True, layout=None):\n layout = self._dock_named_layout(name, layout, compact)\n widget = Dropdown(\n value=value,\n options=rng,\n )\n widget.observe(_generate_callback(callback), names='value')\n self._layout_add_widget(layout, widget)\n return _IpyWidget(widget)\n\n def _dock_add_group_box(self, name, layout=None):\n layout = self._dock_layout if layout is None else layout\n hlayout = VBox()\n self._layout_add_widget(layout, hlayout)\n return hlayout\n\n\ndef _generate_callback(callback, to_float=False):\n def func(data):\n value = data[\"new\"] if \"new\" in data else data[\"old\"]\n callback(float(value) if to_float else value)\n return func\n\n\nclass _IpyToolBar(_AbstractToolBar, _IpyLayout):\n def _tool_bar_load_icons(self):\n self.icons = dict()\n self.icons[\"help\"] = \"question\"\n self.icons[\"play\"] = None\n self.icons[\"pause\"] = None\n self.icons[\"reset\"] = \"history\"\n self.icons[\"scale\"] = \"magic\"\n self.icons[\"clear\"] = \"trash\"\n self.icons[\"movie\"] = \"video-camera\"\n self.icons[\"restore\"] = \"replay\"\n self.icons[\"screenshot\"] = \"camera\"\n self.icons[\"visibility_on\"] = \"eye\"\n self.icons[\"visibility_off\"] = \"eye\"\n\n def _tool_bar_initialize(self, name=\"default\", window=None):\n self.actions = dict()\n self._tool_bar = self._tool_bar_layout = HBox()\n self._layout_initialize(None)\n\n def _tool_bar_add_button(self, name, desc, func, icon_name=None,\n shortcut=None):\n icon_name = name if icon_name is None else icon_name\n icon = self.icons[icon_name]\n if icon is None:\n return\n widget = Button(tooltip=desc, icon=icon)\n widget.on_click(lambda x: func())\n self._layout_add_widget(self._tool_bar_layout, widget)\n self.actions[name] = widget\n\n def _tool_bar_update_button_icon(self, name, icon_name):\n self.actions[name].icon = self.icons[icon_name]\n\n def _tool_bar_add_text(self, name, value, placeholder):\n widget = Text(value=value, placeholder=placeholder)\n self._layout_add_widget(self._tool_bar_layout, widget)\n self.actions[name] = widget\n\n def _tool_bar_add_spacer(self):\n pass\n\n def _tool_bar_add_file_button(self, name, desc, func, shortcut=None):\n def callback():\n fname = self.actions[f\"{name}_field\"].value\n func(None if len(fname) == 0 else fname)\n self._tool_bar_add_text(\n name=f\"{name}_field\",\n value=None,\n placeholder=\"Type a file name\",\n )\n self._tool_bar_add_button(\n name=name,\n desc=desc,\n func=callback,\n )\n\n def _tool_bar_set_theme(self, theme):\n pass\n\n\nclass _IpyMenuBar(_AbstractMenuBar):\n def _menu_initialize(self, window=None):\n pass\n\n def _menu_add_submenu(self, name, desc):\n pass\n\n def _menu_add_button(self, menu_name, name, desc, func):\n pass\n\n\nclass _IpyStatusBar(_AbstractStatusBar, _IpyLayout):\n def _status_bar_initialize(self, window=None):\n self._status_bar = self._status_bar_layout = HBox()\n self._layout_initialize(None)\n\n def _status_bar_add_label(self, value, stretch=0):\n widget = Text(value=value, disabled=True)\n self._layout_add_widget(self._status_bar_layout, widget)\n return _IpyWidget(widget)\n\n def _status_bar_add_progress_bar(self, stretch=0):\n widget = IntProgress()\n self._layout_add_widget(self._status_bar_layout, widget)\n return _IpyWidget(widget)\n\n def _status_bar_update(self):\n pass\n\n\nclass _IpyPlayback(_AbstractPlayback):\n def _playback_initialize(self, func, timeout):\n pass\n\n\nclass _IpyMplInterface(_AbstractMplInterface):\n def _mpl_initialize(self):\n from matplotlib.backends.backend_nbagg import (FigureCanvasNbAgg,\n FigureManager)\n self.canvas = FigureCanvasNbAgg(self.fig)\n self.manager = FigureManager(self.canvas, 0)\n\n\nclass _IpyMplCanvas(_AbstractMplCanvas, _IpyMplInterface):\n def __init__(self, width, height, dpi):\n super().__init__(width, height, dpi)\n self._mpl_initialize()\n\n\nclass _IpyBrainMplCanvas(_AbstractBrainMplCanvas, _IpyMplInterface):\n def __init__(self, brain, width, height, dpi):\n super().__init__(brain, width, height, dpi)\n self._mpl_initialize()\n self._connect()\n\n\nclass _IpyWindow(_AbstractWindow):\n def _window_close_connect(self, func):\n pass\n\n def _window_get_dpi(self):\n return 96\n\n def _window_get_size(self):\n return self.figure.plotter.window_size\n\n def _window_get_simple_canvas(self, width, height, dpi):\n return _IpyMplCanvas(width, height, dpi)\n\n def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces,\n separate_canvas):\n w, h = self._window_get_mplcanvas_size(interactor_fraction)\n self._interactor_fraction = interactor_fraction\n self._show_traces = show_traces\n self._separate_canvas = separate_canvas\n self._mplcanvas = _IpyBrainMplCanvas(\n brain, w, h, self._window_get_dpi())\n return self._mplcanvas\n\n def _window_adjust_mplcanvas_layout(self):\n pass\n\n def _window_get_cursor(self):\n pass\n\n def _window_set_cursor(self, cursor):\n pass\n\n def _window_new_cursor(self, name):\n pass\n\n @contextmanager\n def _window_ensure_minimum_sizes(self):\n yield\n\n def _window_set_theme(self, theme):\n pass\n\n\nclass _IpyWidget(_AbstractWidget):\n def set_value(self, value):\n self._widget.value = value\n\n def get_value(self):\n return self._widget.value\n\n def set_range(self, rng):\n self._widget.min = rng[0]\n self._widget.max = rng[1]\n\n def show(self):\n self._widget.layout.visibility = \"visible\"\n\n def hide(self):\n self._widget.layout.visibility = \"hidden\"\n\n def update(self, repaint=True):\n pass\n\n\nclass _Renderer(_PyVistaRenderer, _IpyDock, _IpyToolBar, _IpyMenuBar,\n _IpyStatusBar, _IpyWindow, _IpyPlayback):\n def __init__(self, *args, **kwargs):\n self._dock = None\n self._tool_bar = None\n self._status_bar = None\n kwargs[\"notebook\"] = True\n super().__init__(*args, **kwargs)\n\n def _update(self):\n if self.figure.display is not None:\n self.figure.display.update_canvas()\n\n def _create_default_tool_bar(self):\n self._tool_bar_load_icons()\n self._tool_bar_initialize()\n self._tool_bar_add_file_button(\n name=\"screenshot\",\n desc=\"Take a screenshot\",\n func=self.screenshot,\n )\n\n def show(self):\n # default tool bar\n if self._tool_bar is None:\n self._create_default_tool_bar()\n display(self._tool_bar)\n # viewer\n try:\n viewer = self.plotter.show(\n use_ipyvtk=True, return_viewer=True)\n except RuntimeError:\n viewer = self.plotter.show(\n backend=\"ipyvtk_simple\", return_viewer=True)\n viewer.layout.width = None # unlock the fixed layout\n # main widget\n if self._dock is None:\n main_widget = viewer\n else:\n main_widget = HBox([self._dock, viewer])\n display(main_widget)\n self.figure.display = viewer\n # status bar\n if self._status_bar is not None:\n display(self._status_bar)\n return self.scene()\n\n\n_testing_context = nullcontext\n"
] |
[
[
"matplotlib.backends.backend_nbagg.FigureCanvasNbAgg",
"matplotlib.backends.backend_nbagg.FigureManager"
]
] |
raunakkmr/Graph-Attention-Networks
|
[
"276e3a1dfb4f9b50944009f796b12b546d05136a"
] |
[
"src/main.py"
] |
[
"from math import ceil\nimport os\nimport sys\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nfrom datasets import node_classification\nimport models\nimport utils\n\ndef main():\n config = utils.parse_args()\n\n if config['cuda'] and torch.cuda.is_available():\n device = 'cuda:0'\n else:\n device = 'cpu'\n\n dataset_args = (config['task'], config['dataset'], config['dataset_path'],\n 'train', config['num_layers'], config['self_loop'],\n config['normalize_adj'], config['transductive'])\n dataset = utils.get_dataset(dataset_args)\n loader = DataLoader(dataset=dataset, batch_size=config['batch_size'],\n shuffle=True, collate_fn=dataset.collate_wrapper)\n input_dim, output_dim = dataset.get_dims()\n\n model = models.GAT(input_dim, config['hidden_dims'], output_dim,\n config['num_heads'], config['dropout'], device)\n model.to(device)\n\n if not config['load']:\n criterion = utils.get_criterion(config['task'])\n optimizer = optim.Adam(model.parameters(), lr=config['lr'],\n weight_decay=config['weight_decay'])\n epochs = config['epochs']\n stats_per_batch = config['stats_per_batch']\n num_batches = int(ceil(len(dataset) / config['batch_size']))\n model.train()\n print('--------------------------------')\n print('Training.')\n for epoch in range(epochs):\n print('Epoch {} / {}'.format(epoch+1, epochs))\n running_loss = 0.0\n num_correct, num_examples = 0, 0\n for (idx, batch) in enumerate(loader):\n features, node_layers, mappings, rows, labels = batch\n features, labels = features.to(device), labels.to(device)\n optimizer.zero_grad()\n out = model(features, node_layers, mappings, rows)\n loss = criterion(out, labels)\n loss.backward()\n optimizer.step()\n with torch.no_grad():\n running_loss += loss.item()\n predictions = torch.max(out, dim=1)[1]\n num_correct += torch.sum(predictions == labels).item()\n num_examples += len(labels)\n if (idx + 1) % stats_per_batch == 0:\n running_loss /= stats_per_batch\n accuracy = num_correct / num_examples\n print(' Batch {} / {}: loss {}, accuracy {}'.format(\n idx+1, num_batches, running_loss, accuracy))\n running_loss = 0.0\n num_correct, num_examples = 0, 0\n print('Finished training.')\n print('--------------------------------')\n\n if config['save']:\n print('--------------------------------')\n directory = os.path.join(os.path.dirname(os.getcwd()),\n 'trained_models')\n if not os.path.exists(directory):\n os.makedirs(directory)\n fname = utils.get_fname(config)\n path = os.path.join(directory, fname)\n print('Saving model at {}'.format(path))\n torch.save(model.state_dict(), path)\n print('Finished saving model.')\n print('--------------------------------')\n\n if config['load']:\n directory = os.path.join(os.path.dirname(os.getcwd()),\n 'trained_models')\n fname = utils.get_fname(config)\n path = os.path.join(directory, fname)\n model.load_state_dict(torch.load(path))\n dataset_args = (config['task'], config['dataset'], config['dataset_path'],\n 'test', config['num_layers'], config['self_loop'],\n config['normalize_adj'], config['transductive'])\n dataset = utils.get_dataset(dataset_args)\n loader = DataLoader(dataset=dataset, batch_size=config['batch_size'],\n shuffle=False, collate_fn=dataset.collate_wrapper)\n criterion = utils.get_criterion(config['task'])\n stats_per_batch = config['stats_per_batch']\n num_batches = int(ceil(len(dataset) / config['batch_size']))\n model.eval()\n print('--------------------------------')\n print('Testing.')\n running_loss, total_loss = 0.0, 0.0\n num_correct, num_examples = 0, 0\n total_correct, total_examples = 0, 0\n for (idx, batch) in enumerate(loader):\n features, node_layers, mappings, rows, labels = batch\n features, labels = features.to(device), labels.to(device)\n out = model(features, node_layers, mappings, rows)\n loss = criterion(out, labels)\n running_loss += loss.item()\n total_loss += loss.item()\n predictions = torch.max(out, dim=1)[1]\n num_correct += torch.sum(predictions == labels).item()\n total_correct += torch.sum(predictions == labels).item()\n num_examples += len(labels)\n total_examples += len(labels)\n if (idx + 1) % stats_per_batch == 0:\n running_loss /= stats_per_batch\n accuracy = num_correct / num_examples\n print(' Batch {} / {}: loss {}, accuracy {}'.format(\n idx+1, num_batches, running_loss, accuracy))\n running_loss = 0.0\n num_correct, num_examples = 0, 0\n total_loss /= num_batches\n total_accuracy = total_correct / total_examples\n print('Loss {}, accuracy {}'.format(total_loss, total_accuracy))\n print('Finished testing.')\n print('--------------------------------')\n\nif __name__ == '__main__':\n main()"
] |
[
[
"torch.max",
"torch.load",
"torch.sum",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
sinhars/Data-Structures-And-Algorithms
|
[
"92f42ac347a69260a28cfb37d3013bfe9b045448"
] |
[
"Course1/Week2/1_fibonacci.py"
] |
[
"# Uses python3\n\nimport sys\nimport numpy as np\n\ndef calc_fib(n):\n if (n <= 1):\n return n\n return calc_fib(n - 1) + calc_fib(n - 2)\n\ndef calc_fib_by_sum(n):\n fib_arr = np.zeros(n + 1, dtype=np.longlong)\n for i in range(n + 1):\n fib_arr[i] = i if (i <= 1) else (fib_arr[i - 1] + fib_arr[i - 2])\n fin_n = fib_arr[n]\n return (fin_n)\n\nif __name__ == '__main__':\n input = sys.stdin.readline()\n n = int(input)\n print(calc_fib_by_sum(n))\n"
] |
[
[
"numpy.zeros"
]
] |
dtish6/CPPN_Runway
|
[
"814c94cf1a5aae9e19f4ea81ad0ae51a057fe38f"
] |
[
"example_model.py"
] |
[
"# MIT License\n\n# Copyright (c) 2019 Runway AI, Inc\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport random\nfrom PIL import Image\nimport tensorflow as tf\nimport numpy as np\n\n\nclass CPPNModel:\n\n def __init__(self, options):\n self.seed = options['seed']\n self.mode = options['mode']\n self.res = options['resolution']\n\n def init_model_tanh(self):\n model = tf.keras.Sequential()\n init = tf.keras.initializers.VarianceScaling(scale=10, mode='fan_in', seed=self.seed)\n model.add(tf.keras.layers.Dense(32, activation=\"tanh\", input_shape=(5,), kernel_initializer=init, use_bias=False))\n model.add(tf.keras.layers.Dense(32, activation=\"tanh\", kernel_initializer=init, use_bias=False))\n model.add(tf.keras.layers.Dense(32, activation=\"tanh\", kernel_initializer=init, use_bias=False))\n # model.add(layers.Dense(32, activation=\"tanh\", kernel_initializer=init,use_bias=False))\n model.add(tf.keras.layers.Dense(1, activation=\"sigmoid\", kernel_initializer=init, use_bias=False))\n return model\n\n def init_model_softplus(self):\n model = tf.keras.Sequential()\n init = tf.keras.initializers.VarianceScaling(scale=10, mode='fan_in', seed=self.seed)\n model.add(tf.keras.layers.Dense(32, activation=\"tanh\", input_shape=(5,), kernel_initializer=init, use_bias=False))\n model.add(tf.keras.layers.Dense(32, activation=\"softplus\", kernel_initializer=init, use_bias=False))\n model.add(tf.keras.layers.Dense(32, activation=\"tanh\", kernel_initializer=init, use_bias=False))\n model.add(tf.keras.layers.Dense(32, activation=\"softplus\", kernel_initializer=init, use_bias=False))\n model.add(tf.keras.layers.Dense(1, activation=\"sigmoid\", kernel_initializer=init, use_bias=False))\n return model\n\n def pixelGrid(self, resolution=64):\n x = np.linspace(-1, 1, resolution)\n X, Y = np.meshgrid(x, x)\n return np.vstack([X.flatten(), Y.flatten()]).T\n\n def generateIm(self, model, z=[0, 0], resolution=64, scale=1.0):\n pixels = self.pixelGrid(resolution) * scale\n input = np.hstack(\n [pixels, np.linalg.norm(pixels, axis=1).reshape(-1, 1), np.repeat([z], resolution ** 2, axis=0) * scale])\n return model.predict(input, batch_size=128).reshape(resolution, resolution)\n\n\n # Generate an image based on input vector.\n def run_on_input(self, input_vec):\n\n # This is an example of how you could use some input from\n # @runway.setup(), like options['truncation'], later inside a\n # function called by @runway.command().\n # text = caption_text[0:self.truncation]\n\n if self.mode == 'tanh':\n model = self.init_model_tanh()\n elif self.mode == 'softplus':\n model = self.init_model_softplus()\n else:\n print(\"Provide either 'tanh' or 'softplus' as the mode\")\n return None\n\n z1,z2,scale = input_vec\n RGB = np.zeros((self.res, self.res, 3), dtype=float)\n\n im1 = self.generateIm(model, z=[z1, z2], scale=scale, resolution=self.res)\n RGB[..., 0] = im1\n RGB[..., 1] = im1\n RGB[..., 2] = im1\n\n return Image.fromarray(np.uint8(RGB*255))\n\n # Return a red image if the input text is \"red\",\n # otherwise return a blue image.\n # if text == 'red':\n # return Image.new('RGB', (512, 512), color = 'red')\n # else:\n # return Image.new('RGB', (512, 512), color = 'blue')\n"
] |
[
[
"numpy.linspace",
"tensorflow.keras.layers.Dense",
"numpy.uint8",
"tensorflow.keras.Sequential",
"numpy.linalg.norm",
"tensorflow.keras.initializers.VarianceScaling",
"numpy.repeat",
"numpy.meshgrid",
"numpy.zeros"
]
] |
vietdelta/TopicModelling
|
[
"3c5ba81bcfe3d7fcb418f21532e82413fc5474ad"
] |
[
"lda_simple.py"
] |
[
"# -*- coding: utf-8 -*-\nimport os\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport lda\nimport numpy as np\nimport pickle\ndata_folder = \"/home/vietphan/Downloads/fbcrawl/Data-Celeb-Nov/\"\nfilename = data_folder+\"model/\"+\"done_processing.txt\"\ncorpus = []\npage_id = []\ndoc_count = 0\nwith open(filename, 'r') as f:\n line = f.readline()\n while(line):\n if(line[0]==\"*\"):\n page_id.append(doc_count)\n else:\n corpus.append(line)\n doc_count = doc_count +1\n line = f.readline()\n# print(corpus)\nvectorizer = CountVectorizer()\nX = vectorizer.fit_transform(corpus)\nvocab = vectorizer.get_feature_names()\n# print(vectorizer.get_feature_names())\n\nmodel = lda.LDA(n_topics=20, n_iter=2000, random_state=1)\nmodel.fit(X)\nprint(\"Saving model....\")\nmodel_filename = data_folder+\"model/\"+'finalized_model.sav'\npickle.dump(model, open(model_filename, 'wb'))\nprint(\"Model saved\")\n\n\n"
] |
[
[
"sklearn.feature_extraction.text.CountVectorizer"
]
] |
ram-ki/SMS-organizer
|
[
"0ee28b07b736cdef84eb58892215b71a43303eda"
] |
[
"prune.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport glob\n\nimport data.normalise as nm\nfrom data.duplicates import group_duplicates\nfrom data.features import compute_features\n\nnw_features_disc = {\n\t'Time': {\n\t\t'func': nm.change_time,\n\t\t'input': 'time'\n\t},\n\t'Date': {\n\t\t'func': nm.has_dates,\n\t\t'input': 'message'\n\t},\n\t'Number': {\n\t\t'func': nm.has_numbers,\n\t\t'input': 'message'\n\t},\n\t'Decimal': {\n\t\t'func': nm.has_decimals,\n\t\t'input': 'message'\n\t},\n\t'URL': {\n\t\t'func': nm.has_urls,\n\t\t'input': 'message'\n\t}\n}\n\n\nif __name__ == '__main__':\n\tall_files = glob.glob('data/raw_db' + '/*.csv')\n\n\tli = []\n\tfor filename in all_files:\n\t\tdf = pd.read_csv(filename)\n\t\tdf.columns = ['sender', 'time', 3, 'message']\n\t\tli.append(df)\n\n\tdata = pd.concat(li, axis=0)\n\tsms = data.iloc[:, [0, 1, 3]]\n\tsms = sms.drop(sms.columns[0], axis=1) # doubt\n\n\tsms['message'] = sms['message'].apply(nm.remove_newlines)\n\n\thuman = pd.read_csv('data/pruned_db/old_human.csv')\n\thuman.columns = ['index', 'sender', 'message', 'label']\n\thuman = pd.concat(human, [sms.iloc[:, [0, 1, 2]]]).reset_index()\n\n\tnw_features = pd.DataFrame(index=range(len(human)), columns=[feat for feat in nw_features_disc])\n\n\tfor feature in nw_features_disc:\n\t\tdisc = nw_features_disc[feature]\n\t\tnw_features[feature] = human[disc['input']].astype(str).apply(disc['func'])\n\n\tnw_features = nw_features.to_numpy()\n\n\twords, w_features = compute_features(human['Message'].to_numpy().astype(str), compute=True)\n\tnumber_words = nm.number_words(w_features)\n\tnw_features = np.append(nw_features, number_words, axis=1)\n\n\tdata = np.append(nw_features, w_features, axis=1)\n\n\thuman.to_csv(\"data/pruned_db/human.csv\")\n\n\tnp.savetxt(\"data/pruned_db/words.csv\", words, fmt='%s', encoding='utf8')\n\tnp.savetxt(\"data/pruned_db/unlabeled.csv\", data, delimiter=\",\")\n\n\tgroup_duplicates(w_features)\n"
] |
[
[
"numpy.savetxt",
"pandas.concat",
"pandas.read_csv",
"numpy.append"
]
] |
yashk2000/ai-economist
|
[
"b9424f2283023748399b9b56999d213164d2f258"
] |
[
"Experiment 2/training_scripts/aie/plotting.py"
] |
[
"# Copyright (c) 2020, salesforce.com, inc.\n# All rights reserved.\n# SPDX-License-Identifier: BSD-3-Clause\n# For full license text, see the LICENSE file in the repo root\n# or https://opensource.org/licenses/BSD-3-Clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom ai_economist.foundation import landmarks, resources\n\n\ndef plot_map(maps, locs, ax=None, cmap_order=None):\n world_size = np.array(maps.get(\"Wood\")).shape\n max_health = {\"Wood\": 1, \"Stone\": 1, \"House\": 1}\n n_agents = len(locs)\n\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=(10, 10))\n else:\n ax.cla()\n tmp = np.zeros((3, world_size[0], world_size[1]))\n cmap = plt.get_cmap(\"jet\", n_agents)\n\n if cmap_order is None:\n cmap_order = list(range(n_agents))\n else:\n cmap_order = list(cmap_order)\n assert len(cmap_order) == n_agents\n\n scenario_entities = [k for k in maps.keys() if \"source\" not in k.lower()]\n for entity in scenario_entities:\n if entity == \"House\":\n continue\n elif resources.has(entity):\n if resources.get(entity).collectible:\n map_ = (\n resources.get(entity).color[:, None, None]\n * np.array(maps.get(entity))[None]\n )\n map_ /= max_health[entity]\n tmp += map_\n elif landmarks.has(entity):\n map_ = (\n landmarks.get(entity).color[:, None, None]\n * np.array(maps.get(entity))[None]\n )\n tmp += map_\n else:\n continue\n\n if isinstance(maps, dict):\n house_idx = np.array(maps.get(\"House\")[\"owner\"])\n house_health = np.array(maps.get(\"House\")[\"health\"])\n else:\n house_idx = maps.get(\"House\", owner=True)\n house_health = maps.get(\"House\")\n for i in range(n_agents):\n houses = house_health * (house_idx == cmap_order[i])\n agent = np.zeros_like(houses)\n agent += houses\n col = np.array(cmap(i)[:3])\n map_ = col[:, None, None] * agent[None]\n tmp += map_\n\n tmp *= 0.7\n tmp += 0.3\n\n tmp = np.transpose(tmp, [1, 2, 0])\n tmp = np.minimum(tmp, 1.0)\n\n ax.imshow(tmp, vmax=1.0, aspect=\"auto\")\n\n bbox = ax.get_window_extent()\n\n for i in range(n_agents):\n r, c = locs[cmap_order[i]]\n col = np.array(cmap(i)[:3])\n ax.plot(c, r, \"o\", markersize=bbox.height * 20 / 550, color=\"w\")\n ax.plot(c, r, \"*\", markersize=bbox.height * 15 / 550, color=col)\n\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef plot_env_state(env, ax=None, remap_key=None):\n maps = env.world.maps\n locs = [agent.loc for agent in env.world.agents]\n\n if remap_key is None:\n cmap_order = None\n else:\n assert isinstance(remap_key, str)\n cmap_order = np.argsort(\n [agent.state[remap_key] for agent in env.world.agents]\n ).tolist()\n\n plot_map(maps, locs, ax, cmap_order)\n\n\ndef plot_log_state(dense_log, t, ax=None, remap_key=None):\n maps = dense_log[\"world\"][t]\n states = dense_log[\"states\"][t]\n\n n_agents = len(states) - 1\n locs = []\n for i in range(n_agents):\n r, c = states[str(i)][\"loc\"]\n locs.append([r, c])\n\n if remap_key is None:\n cmap_order = None\n else:\n assert isinstance(remap_key, str)\n key_val = np.array(\n [dense_log[\"states\"][0][str(i)][remap_key] for i in range(n_agents)]\n )\n cmap_order = np.argsort(key_val).tolist()\n\n plot_map(maps, locs, ax, cmap_order)\n\n\ndef _format_logs_and_eps(dense_logs, eps):\n if isinstance(dense_logs, dict):\n return [dense_logs], [0]\n else:\n assert isinstance(dense_logs, (list, tuple))\n\n if isinstance(eps, (list, tuple)):\n return dense_logs, list(eps)\n elif isinstance(eps, (int, float)):\n return dense_logs, [int(eps)]\n elif eps is None:\n return dense_logs, list(range(np.minimum(len(dense_logs), 16)))\n else:\n raise NotImplementedError\n\n\ndef vis_world_array(dense_logs, ts, eps=None, axes=None, remap_key=None):\n dense_logs, eps = _format_logs_and_eps(dense_logs, eps)\n if isinstance(ts, (int, float)):\n ts = [ts]\n\n if axes is None:\n fig, axes = plt.subplots(\n len(eps),\n len(ts),\n figsize=(np.minimum(3.2 * len(ts), 16), 3 * len(eps)),\n squeeze=False,\n )\n\n else:\n fig = None\n\n if len(ts) == 1 and len(eps) == 1:\n axes = np.array([[axes]]).reshape(1, 1)\n else:\n try:\n axes = np.array(axes).reshape(len(eps), len(ts))\n except ValueError:\n print(\"Could not reshape provided axes array into the necessary shape!\")\n raise\n\n for ti, t in enumerate(ts):\n for ei, ep in enumerate(eps):\n plot_log_state(dense_logs[ep], t, ax=axes[ei, ti], remap_key=remap_key)\n\n for ax, t in zip(axes[0], ts):\n ax.set_title(\"T = {}\".format(t))\n for ax, ep in zip(axes[:, 0], eps):\n ax.set_ylabel(\"Episode {}\".format(ep))\n\n return fig\n\n\ndef vis_world_range(\n dense_logs, t0=0, tN=None, N=5, eps=None, axes=None, remap_key=None\n):\n dense_logs, eps = _format_logs_and_eps(dense_logs, eps)\n\n viable_ts = np.array([i for i, w in enumerate(dense_logs[0][\"world\"]) if w])\n if tN is None:\n tN = viable_ts[-1]\n assert 0 <= t0 < tN\n target_ts = np.linspace(t0, tN, N).astype(np.int)\n\n ts = set()\n for tt in target_ts:\n closest = np.argmin(np.abs(tt - viable_ts))\n ts.add(viable_ts[closest])\n ts = sorted(list(ts))\n if axes is not None:\n axes = axes[: len(ts)]\n return vis_world_array(dense_logs, ts, axes=axes, eps=eps, remap_key=remap_key)\n\n\ndef vis_builds(dense_logs, eps=None, ax=None):\n dense_logs, eps = _format_logs_and_eps(dense_logs, eps)\n\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=(16, 3))\n cmap = plt.get_cmap(\"jet\", len(eps))\n for i, ep in enumerate(eps):\n ax.plot(\n np.cumsum([len(b[\"builds\"]) for b in dense_logs[ep][\"Build\"]]),\n color=cmap(i),\n label=\"Ep {}\".format(ep),\n )\n ax.legend()\n ax.grid(b=True)\n ax.set_ylim(bottom=0)\n\n\ndef trade_str(c_trades, resource, agent, income=True):\n if income:\n p = [x[\"income\"] for x in c_trades[resource] if x[\"seller\"] == agent]\n else:\n p = [x[\"cost\"] for x in c_trades[resource] if x[\"buyer\"] == agent]\n if len(p) > 0:\n return \"{:6.2f} (n={:3d})\".format(np.mean(p), len(p))\n else:\n tmp = \"~\" * 8\n tmp = (\" \") * 3 + tmp + (\" \") * 3\n return tmp\n\n\ndef full_trade_str(c_trades, resource, a_indices, income=True):\n s_head = \"{} ({})\".format(\"Income\" if income else \"Cost\", resource)\n ac_strings = [trade_str(c_trades, resource, buyer, income) for buyer in a_indices]\n s_tail = \" | \".join(ac_strings)\n return \"{:<15}: {}\".format(s_head, s_tail)\n\n\ndef build_str(all_builds, agent):\n p = [x[\"income\"] for x in all_builds if x[\"builder\"] == agent]\n if len(p) > 0:\n return \"{:6.2f} (n={:3d})\".format(np.mean(p), len(p))\n else:\n tmp = \"~\" * 8\n tmp = (\" \") * 3 + tmp + (\" \") * 3\n return tmp\n\n\ndef full_build_str(all_builds, a_indices):\n s_head = \"Income (Build)\"\n ac_strings = [build_str(all_builds, builder) for builder in a_indices]\n s_tail = \" | \".join(ac_strings)\n return \"{:<15}: {}\".format(s_head, s_tail)\n\n\ndef header_str(n_agents):\n s_head = (\"_\" * 15) + \":_\"\n s_tail = \"_|_\".join([\" Agent {:2d} ____\".format(i) for i in range(n_agents)])\n return s_head + s_tail\n\n\ndef report(c_trades, all_builds, n_agents, a_indices=None):\n if a_indices is None:\n a_indices = list(range(n_agents))\n print(header_str(n_agents))\n resources = [\"Wood\", \"Stone\"]\n if c_trades is not None:\n for resource in resources:\n print(full_trade_str(c_trades, resource, a_indices, income=False))\n print(\"\")\n for resource in resources:\n print(full_trade_str(c_trades, resource, a_indices, income=True))\n print(full_build_str(all_builds, a_indices))\n\n\ndef breakdown(log, remap_key=None):\n fig0 = vis_world_range(log, remap_key=remap_key)\n\n n = len(list(log[\"states\"][0].keys())) - 1\n trading_active = \"Trade\" in log\n\n if remap_key is None:\n aidx = list(range(n))\n else:\n assert isinstance(remap_key, str)\n key_vals = np.array([log[\"states\"][0][str(i)][remap_key] for i in range(n)])\n aidx = np.argsort(key_vals).tolist()\n\n all_builds = []\n for t, builds in enumerate(log[\"Build\"]):\n if isinstance(builds, dict):\n builds_ = builds[\"builds\"]\n else:\n builds_ = builds\n for build in builds_:\n this_build = {\"t\": t}\n this_build.update(build)\n all_builds.append(this_build)\n\n if trading_active:\n c_trades = {\"Stone\": [], \"Wood\": []}\n for t, trades in enumerate(log[\"Trade\"]):\n if isinstance(trades, dict):\n trades_ = trades[\"trades\"]\n else:\n trades_ = trades\n for trade in trades_:\n this_trade = {\n \"t\": t,\n \"t_ask\": t - trade[\"ask_lifetime\"],\n \"t_bid\": t - trade[\"bid_lifetime\"],\n }\n this_trade.update(trade)\n c_trades[trade[\"commodity\"]].append(this_trade)\n\n incomes = {\n \"Sell Stone\": [\n sum([t[\"income\"] for t in c_trades[\"Stone\"] if t[\"seller\"] == aidx[i]])\n for i in range(n)\n ],\n \"Buy Stone\": [\n sum([-t[\"price\"] for t in c_trades[\"Stone\"] if t[\"buyer\"] == aidx[i]])\n for i in range(n)\n ],\n \"Sell Wood\": [\n sum([t[\"income\"] for t in c_trades[\"Wood\"] if t[\"seller\"] == aidx[i]])\n for i in range(n)\n ],\n \"Buy Wood\": [\n sum([-t[\"price\"] for t in c_trades[\"Wood\"] if t[\"buyer\"] == aidx[i]])\n for i in range(n)\n ],\n \"Build\": [\n sum([b[\"income\"] for b in all_builds if b[\"builder\"] == aidx[i]])\n for i in range(n)\n ],\n }\n\n else:\n c_trades = None\n incomes = {\n \"Build\": [\n sum([b[\"income\"] for b in all_builds if b[\"builder\"] == aidx[i]])\n for i in range(n)\n ],\n }\n\n incomes[\"Total\"] = np.stack([v for v in incomes.values()]).sum(axis=0)\n\n endows = [\n int(\n log[\"states\"][-1][str(aidx[i])][\"inventory\"][\"Coin\"]\n + log[\"states\"][-1][str(aidx[i])][\"escrow\"][\"Coin\"]\n )\n for i in range(n)\n ]\n\n n_small = np.minimum(4, n)\n\n report(c_trades, all_builds, n, aidx)\n\n cmap = plt.get_cmap(\"jet\", n)\n rs = [\"Wood\", \"Stone\", \"Coin\"]\n\n fig1, axes = plt.subplots(1, len(rs) + 1, figsize=(16, 4), sharey=False)\n for r, ax in zip(rs, axes):\n for i in range(n):\n ax.plot(\n [\n x[str(aidx[i])][\"inventory\"][r] + x[str(aidx[i])][\"escrow\"][r]\n for x in log[\"states\"]\n ],\n label=i,\n color=cmap(i),\n )\n ax.set_title(r)\n ax.legend()\n ax.grid(b=True)\n\n ax = axes[-1]\n for i in range(n):\n ax.plot(\n [x[str(aidx[i])][\"endogenous\"][\"Labor\"] for x in log[\"states\"]],\n label=i,\n color=cmap(i),\n )\n ax.set_title(\"Labor\")\n ax.legend()\n ax.grid(b=True)\n\n tmp = np.array(log[\"world\"][0][\"Stone\"])\n fig2, axes = plt.subplots(\n 2 if trading_active else 1,\n n_small,\n figsize=(16, 8 if trading_active else 4),\n sharex=\"row\",\n sharey=\"row\",\n squeeze=False,\n )\n for i, ax in enumerate(axes[0]):\n rows = np.array([x[str(aidx[i])][\"loc\"][0] for x in log[\"states\"]]) * -1\n cols = np.array([x[str(aidx[i])][\"loc\"][1] for x in log[\"states\"]])\n ax.plot(cols[::20], rows[::20])\n ax.plot(cols[0], rows[0], \"r*\", markersize=15)\n ax.plot(cols[-1], rows[-1], \"g*\", markersize=15)\n ax.set_title(\"Agent {}\".format(i))\n ax.set_xlim([-1, 1 + tmp.shape[1]])\n ax.set_ylim([-(1 + tmp.shape[0]), 1])\n\n if trading_active:\n for i, ax in enumerate(axes[1]):\n for r in [\"Wood\", \"Stone\"]:\n tmp = [\n (s[\"t\"], s[\"income\"]) for s in c_trades[r] if s[\"seller\"] == aidx[i]\n ]\n if tmp:\n ts, prices = [np.array(x) for x in zip(*tmp)]\n ax.plot(\n np.stack([ts, ts]),\n np.stack([np.zeros_like(prices), prices]),\n color=resources.get(r).color,\n )\n ax.plot(\n ts, prices, \".\", color=resources.get(r).color, markersize=12\n )\n\n tmp = [\n (s[\"t\"], -s[\"cost\"]) for s in c_trades[r] if s[\"buyer\"] == aidx[i]\n ]\n if tmp:\n ts, prices = [np.array(x) for x in zip(*tmp)]\n ax.plot(\n np.stack([ts, ts]),\n np.stack([np.zeros_like(prices), prices]),\n color=resources.get(r).color,\n )\n ax.plot(\n ts, prices, \".\", color=resources.get(r).color, markersize=12\n )\n ax.plot([-20, len(log[\"states\"]) + 19], [0, 0], \"w-\")\n # ax.set_ylim([-10.2, 10.2]);\n ax.set_xlim([-20, len(log[\"states\"]) + 19])\n ax.grid(b=True)\n ax.set_facecolor([0.3, 0.3, 0.3])\n\n return (fig0, fig1, fig2), incomes, endows, c_trades, all_builds\n\n\ndef plot_for_each_n(y_fun, n, ax=None):\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=(5, 5))\n cmap = plt.get_cmap(\"jet\", n)\n for i in range(n):\n ax.plot(y_fun(i), color=cmap(i), label=i)\n ax.legend()\n ax.grid(b=True)\n"
] |
[
[
"numpy.minimum",
"numpy.abs",
"numpy.linspace",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.subplots",
"numpy.stack",
"numpy.zeros_like",
"numpy.mean",
"numpy.transpose",
"numpy.argsort",
"numpy.array",
"numpy.zeros"
]
] |
wrongu/modularity
|
[
"9186744916aa4b2ef808e6ee542b18e73ba5c9e1"
] |
[
"generate_dummy_checkpoints.py"
] |
[
"import torch\nfrom models import LitWrapper\nfrom pathlib import Path\nimport argparse\n\n\ndef create_dummy_checkpoint(dataset, task, uid, save_dir=Path()):\n mdl = LitWrapper(dataset=dataset, task=task, l2=0., l1=0., drop=0., run=uid)\n\n the_path = save_dir / mdl.get_uid()\n the_path.mkdir(exist_ok=True)\n the_file = the_path / 'dummy.ckpt'\n if not the_file.exists():\n mdl.init_model(set_seed=True)\n dummy_data = {\n LitWrapper.CHECKPOINT_HYPER_PARAMS_KEY: {\n 'dataset': dataset,\n 'task': task,\n 'l2': 0.0,\n 'l1': 0.0,\n 'drop': 0.0,\n 'run': uid\n },\n 'state_dict': mdl.state_dict()\n }\n torch.save(dummy_data, the_file)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', required=True)\n parser.add_argument('--task', required=True)\n parser.add_argument('--num', default=500, type=int)\n parser.add_argument('--save-dir', default=Path(), type=Path)\n args = parser.parse_args()\n\n for i in range(args.num):\n create_dummy_checkpoint(dataset=args.dataset,\n task=args.task,\n save_dir=args.save_dir,\n uid=1000+i)\n"
] |
[
[
"torch.save"
]
] |
Toraudonn/RecordWithRS
|
[
"4793416b1b3fd3ec5339bd4b070f980a401f11fd"
] |
[
"python/rendering.py"
] |
[
"import chainer\nimport open3d as o3\nimport numpy as np\nimport csv\nimport cv2\nfrom pprint import pprint \n\nimport argparse\nimport sys\nimport os\nimport time\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nabs_op_lib = os.path.join(dir_path, 'openpose')\nfrom openpose import params, JointType\n\n\nclass Joint:\n\n def __init__(self, P, index, coord):\n '''\n Init with\n P: 4x4 Transform Matrix\n index: joint index\n coord: joint's camera coordinate\n '''\n self.point = self.convert2world(P, coord)\n self.index = index\n self.name = JointType(index).name\n\n def convert2world(self, P, coord):\n '''\n Convert from Camera coordniate to World coordinate (according to P)\n '''\n _coord = np.concatenate([np.asarray(coord), [1.000]])\n _P = np.array(P)\n #FIXME: Remove this when P is fixed\n rotate = np.array([[1,0,0,0], [0,-1,0,0],[0,0,-1,0],[0,0,0,1]])\n n = rotate.dot(_coord)\n return _P.dot(n)[:3]\n\n\nclass Joints:\n\n def __init__(self, P, raw_jnts):\n assert len(raw_jnts) == 18, \"Not enough points to make Joints.\"\n self.P = P\n self.joints = {}\n\n for i, jointType in enumerate(JointType):\n if (raw_jnts[i] == [0, 0, 0]).all():\n # create a zero vector for place holder\n self.joints[jointType.name] = np.zeros(3)\n else:\n joint = Joint(P, i, raw_jnts[i])\n self.joints[jointType.name] = joint.point\n\n def get_array(self):\n arr = []\n for k, v in self.joints.items():\n if np.all(v != 0):\n arr.append(v)\n return np.asarray(arr)\n\n def get_array_of_joint(self, joint):\n if np.all(self.joints[joint] != 0):\n return np.asarray(self.joints[joint])\n\n def to_pointcloud(self):\n pc = o3.PointCloud()\n pc.points = o3.Vector3dVector(np.array(self.get_array()))\n return pc\n\n def normalize(self, v):\n '''\n Normalize a vector\n '''\n norm = np.linalg.norm(v)\n if norm == 0: \n return v\n return v / norm\n\n def get_rotation(self, a, b):\n '''\n Calculate rotation matrix from two 3D unit vectors\n - maps vector 'a' onto vector 'b'\n\n FIXME: when the two vectors are parallel\n '''\n # https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d\n \n V = np.cross(a, b)\n s = np.linalg.norm(V)\n c = np.dot(a, b)\n I = np.array([[1, 0, 0], \n [0, 1, 0], \n [0, 0, 1]\n ])\n Vx = np.array([[0, -V[2], V[1]], \n [V[2], 0, -V[0]], \n [-V[1], V[0], 0]\n ])\n R = I + Vx + np.matmul(Vx, Vx) * (1 / (1 + c))\n return R\n \n def create_skeleton_geometry(self):\n '''\n Create human skeleton geometry\n '''\n geometries = []\n \n joint_colors = [\n [255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0],\n [85, 255, 0], [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255],\n [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], [170, 0, 255],\n [255, 0, 255], [255, 0, 170], [255, 0, 85]\n ]\n\n limb_colors = [\n [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255],\n [0, 85, 255], [255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0.],\n [255, 0, 85], [170, 255, 0], [85, 255, 0], [170, 0, 255.], [0, 0, 255],\n [0, 0, 255], [255, 0, 255], [170, 0, 255], [255, 0, 170],\n ]\n\n for i, (jointType, color) in enumerate(zip(JointType, joint_colors)):\n if np.all(self.joints[jointType.name] != 0):\n sphere = o3.create_mesh_sphere(radius = 10.0)\n pos = np.concatenate([np.asarray(self.joints[jointType.name]), [1]])\n # create translation matrix\n Tm = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], pos]).T\n # move sphere\n sphere.transform(Tm)\n # paint sphere\n sphere.paint_uniform_color([v / 255 for v in color])\n\n geometries.append(sphere)\n\n for i, (limb, color) in enumerate(zip(params['limbs_point'], limb_colors)):\n if i != 9 and i != 13: # don't show ear-shoulder connection\n l1 = limb[0].name\n l2 = limb[1].name\n pl1 = self.joints[l1]\n pl2 = self.joints[l2]\n\n if np.any(pl1) and np.any(pl2):\n dist = np.linalg.norm(pl1 - pl2)\n midpoint = np.concatenate([(pl1 + pl2) / 2, [1]])\n\n # orientation of cylindar (z axis)\n vec_cylindar = np.array([0, 0, 1])\n # normalized vector of the two points connected\n norm = self.normalize(pl2 - pl1)\n \n # get rotation matrix\n R = self.get_rotation(vec_cylindar, norm).T\n \n # create translation matrix\n tm1 = np.concatenate([R[0], [0]])\n tm2 = np.concatenate([R[1], [0]])\n tm3 = np.concatenate([R[2], [0]])\n Tm = np.array([tm1, tm2, tm3, midpoint]).T\n\n # create the cylinder\n cylinder = o3.create_mesh_cylinder(radius = 5.0, height = dist)\n # move the cylinder\n cylinder.transform(Tm)\n # paint the cylinder\n cylinder.paint_uniform_color([v / 255 for v in color])\n \n geometries.append(cylinder)\n \n return geometries\n\nimport time\n\nclass CustomVisualizer:\n\n def __init__(self, base):\n self.base = base\n \n def intialize_visualizer(self):\n '''\n Function to add geometry (cannot destroy)\n '''\n \n h, w = self._get_window_size()\n self.vis = o3.Visualizer()\n self.vis.create_window('pose', width=int(w), height=int(h), left=50, right=50)\n self.vis.add_geometry(self.base)\n\n self.render_option = self.vis.get_render_option().load_from_json(\n \"static_data/renderoption.json\")\n \n self.trajectory = o3.read_pinhole_camera_trajectory(\"static_data/pinholeCameraTrajectory.json\")\n self.custom_view()\n self.vis.update_renderer()\n self.vis.run()\n \n\n def _get_window_size(self):\n intrinsics = o3.read_pinhole_camera_intrinsic(\"static_data/pinholeCameraIntrinsic.json\")\n h = intrinsics.height\n w = intrinsics.width\n return h, w\n\n\n def update_geometry(self, pcd):\n \n for p in pcd:\n self.vis.add_geometry(p)\n \n self.vis.update_geometry()\n \n self.vis.reset_view_point(False)\n self.custom_view()\n self.vis.poll_events()\n self.vis.update_renderer()\n\n for p in pcd:\n p.clear()\n \n def custom_view(self):\n ctr = self.vis.get_view_control()\n intrinsic = self.trajectory.intrinsic\n extrinsic = self.trajectory.extrinsic\n ctr.convert_from_pinhole_camera_parameters(intrinsic, np.asarray(extrinsic)[0])\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Pose Getter')\n parser.add_argument('--data', default= '/mnt/extHDD/save_data/20180909_1316/',help='relative data path from where you use this program')\n parser.add_argument('--static', default='static_data', help='static data location')\n args = parser.parse_args()\n\n # get directory of data (rgb, depth)\n data_path = os.path.join(dir_path, args.data)\n static_path = os.path.join(args.static)\n assert os.path.exists(data_path), \"Could not find data directory in the path: {}\".format(data_path)\n assert os.path.exists(static_path), \"Could not find static data directory in the path: {}\".format(static_path)\n print('Getting data from: {}'.format(data_path))\n\n # Translation matrix\n P_matrix_filename = os.path.join(static_path, 'T.csv')\n P = np.loadtxt(P_matrix_filename, delimiter=',')\n\n # Load room\n room_ply = os.path.join(static_path, 'room_A.ply')\n pc_room = o3.read_point_cloud(room_ply)\n\n # pose path\n pose_path = os.path.join(data_path, 'pose')\n\n # initialize visualizer\n vis = CustomVisualizer(pc_room)\n vis.intialize_visualizer()\n\n files = os.listdir(pose_path)\n filenames = sorted(files, key=lambda f: int(''.join(filter(str.isdigit, f))))\n\n for fn in filenames: #FIXME: didn't sort by number, but name\n if fn.endswith('.csv'):\n print(fn)\n # get joints data and turn it into numpy array\n csv_path = os.path.join(pose_path, fn)\n raw_joints = np.loadtxt(csv_path, delimiter=',')\n \n joints = Joints(P, raw_joints)\n # get skeleton geometries\n \n pc_joints = joints.create_skeleton_geometry()\n\n vis.update_geometry(pc_joints)\n \n time.sleep(0.025)\n"
] |
[
[
"numpy.dot",
"numpy.asarray",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.all",
"numpy.concatenate",
"numpy.any",
"numpy.cross",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt"
]
] |
apexrl/CoDAIL
|
[
"d6996698155677b51f5b844d848bf2bdce0f8a5f"
] |
[
"multi-agent-particle-envs/multiagent/scenarios/simple_tag.py"
] |
[
"import numpy as np\nfrom multiagent.core import World, Agent, Landmark\nfrom multiagent.scenario import BaseScenario\n\n\nclass Scenario(BaseScenario):\n def make_world(self):\n world = World()\n # set any world properties first\n world.dim_c = 2\n num_good_agents = 1\n num_adversaries = 3\n num_agents = num_adversaries + num_good_agents\n num_landmarks = 2\n # add agents\n world.agents = [Agent() for i in range(num_agents)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = True\n agent.silent = True\n agent.adversary = True if i < num_adversaries else False\n agent.size = 0.075 if agent.adversary else 0.05\n agent.accel = 3.0 if agent.adversary else 4.0\n #agent.accel = 20.0 if agent.adversary else 25.0\n agent.max_speed = 1.0 if agent.adversary else 1.3\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = 'landmark %d' % i\n landmark.collide = True\n landmark.movable = False\n landmark.size = 0.2\n landmark.boundary = False\n # make initial conditions\n self.reset_world(world)\n return world\n\n def reset_world(self, world):\n # random properties for agents\n for i, agent in enumerate(world.agents):\n agent.color = np.array([0.35, 0.85, 0.35]) if not agent.adversary else np.array([0.85, 0.35, 0.35])\n # random properties for landmarks\n for i, landmark in enumerate(world.landmarks):\n landmark.color = np.array([0.25, 0.25, 0.25])\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n if not landmark.boundary:\n landmark.state.p_pos = np.random.uniform(-0.9, +0.9, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def benchmark_data(self, agent, world):\n # returns data for benchmarking purposes\n if agent.adversary:\n collisions = 0\n for a in self.good_agents(world):\n if self.is_collision(a, agent):\n collisions += 1\n return collisions\n else:\n return 0\n\n def is_collision(self, agent1, agent2):\n delta_pos = agent1.state.p_pos - agent2.state.p_pos\n dist = np.sqrt(np.sum(np.square(delta_pos)))\n dist_min = agent1.size + agent2.size\n return True if dist < dist_min else False\n\n # return all agents that are not adversaries\n def good_agents(self, world):\n return [agent for agent in world.agents if not agent.adversary]\n\n # return all adversarial agents\n def adversaries(self, world):\n return [agent for agent in world.agents if agent.adversary]\n\n def reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark\n main_reward = self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)\n return main_reward\n\n def agent_reward(self, agent, world):\n # Agents are negatively rewarded if caught by adversaries\n rew = 0\n shape = False\n adversaries = self.adversaries(world)\n if shape: # reward can optionally be shaped (increased reward for increased distance from adversary)\n for adv in adversaries:\n rew += 0.1 * np.sqrt(np.sum(np.square(agent.state.p_pos - adv.state.p_pos)))\n if agent.collide:\n for a in adversaries:\n if self.is_collision(a, agent):\n rew -= 10\n\n # agents are penalized for exiting the screen, so that they can be caught by the adversaries\n def bound(x):\n if x < 0.9:\n return 0\n if x < 1.0:\n return (x - 0.9) * 10\n return min(np.exp(2 * x - 2), 10)\n for p in range(world.dim_p):\n x = abs(agent.state.p_pos[p])\n rew -= bound(x)\n\n return rew\n\n def adversary_reward(self, agent, world):\n # Adversaries are rewarded for collisions with agents\n rew = 0\n shape = False\n agents = self.good_agents(world)\n adversaries = self.adversaries(world)\n if shape: # reward can optionally be shaped (decreased reward for increased distance from agents)\n for adv in adversaries:\n rew -= 0.1 * min([np.sqrt(np.sum(np.square(a.state.p_pos - adv.state.p_pos))) for a in agents])\n if agent.collide:\n for ag in agents:\n for adv in adversaries:\n if self.is_collision(ag, adv):\n rew += 10\n return rew\n\n def observation(self, agent, world):\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks:\n if not entity.boundary:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n # communication of all other agents\n comm = []\n other_pos = []\n other_vel = []\n for other in world.agents:\n if other is agent: continue\n comm.append(other.state.c)\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n if not other.adversary:\n other_vel.append(other.state.p_vel)\n return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + other_vel)\n\n def done(self, agent, world):\n if world.time >= 50:\n return True\n else:\n return False"
] |
[
[
"numpy.square",
"numpy.concatenate",
"numpy.exp",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
] |
dmarx/lightning-flash
|
[
"4cda031c1f9c8d8754fd36b5720d2a5a7d866765"
] |
[
"tests/core/data/io/test_input_transform.py"
] |
[
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom unittest.mock import Mock\n\nimport pytest\nimport torch\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data._utils.collate import default_collate\n\nfrom flash import DataModule\nfrom flash.core.data.io.input import InputFormat\nfrom flash.core.data.io.input_transform import (\n _InputTransformProcessor,\n _InputTransformSequential,\n DefaultInputTransform,\n)\nfrom flash.core.utilities.stages import RunningStage\n\n\nclass CustomInputTransform(DefaultInputTransform):\n def __init__(self):\n super().__init__(\n inputs={\n \"test\": Mock(return_value=\"test\"),\n InputFormat.TENSORS: Mock(return_value=\"tensors\"),\n },\n default_input=\"test\",\n )\n\n\ndef test_input_transform_processor_str():\n input_transform_processor = _InputTransformProcessor(\n Mock(name=\"input_transform\"),\n default_collate,\n torch.relu,\n torch.softmax,\n RunningStage.TRAINING,\n False,\n True,\n )\n assert str(input_transform_processor) == (\n \"_InputTransformProcessor:\\n\"\n \"\\t(per_sample_transform): FuncModule(relu)\\n\"\n \"\\t(collate_fn): FuncModule(default_collate)\\n\"\n \"\\t(per_batch_transform): FuncModule(softmax)\\n\"\n \"\\t(apply_per_sample_transform): False\\n\"\n \"\\t(on_device): True\\n\"\n \"\\t(stage): RunningStage.TRAINING\"\n )\n\n\ndef test_sequential_str():\n sequential = _InputTransformSequential(\n Mock(name=\"input_transform\"),\n torch.softmax,\n torch.as_tensor,\n torch.relu,\n RunningStage.TRAINING,\n True,\n )\n assert str(sequential) == (\n \"_InputTransformSequential:\\n\"\n \"\\t(pre_tensor_transform): FuncModule(softmax)\\n\"\n \"\\t(to_tensor_transform): FuncModule(as_tensor)\\n\"\n \"\\t(post_tensor_transform): FuncModule(relu)\\n\"\n \"\\t(assert_contains_tensor): True\\n\"\n \"\\t(stage): RunningStage.TRAINING\"\n )\n\n\ndef test_input_of_name():\n input_transform = CustomInputTransform()\n\n assert input_transform.input_of_name(\"test\")() == \"test\"\n assert input_transform.input_of_name(InputFormat.TENSORS)() == \"tensors\"\n assert input_transform.input_of_name(\"tensors\")() == \"tensors\"\n assert input_transform.input_of_name(\"default\")() == \"test\"\n\n with pytest.raises(MisconfigurationException, match=\"available data sources are: test, tensor\"):\n input_transform.input_of_name(\"not available\")\n\n\ndef test_available_inputs():\n input_transform = CustomInputTransform()\n\n assert InputFormat.TENSORS in input_transform.available_inputs()\n assert \"test\" in input_transform.available_inputs()\n assert len(input_transform.available_inputs()) == 3\n\n data_module = DataModule(input_transform=input_transform)\n\n assert InputFormat.TENSORS in data_module.available_inputs()\n assert \"test\" in data_module.available_inputs()\n assert len(data_module.available_inputs()) == 3\n\n\ndef test_check_transforms():\n transform = torch.nn.Identity()\n DefaultInputTransform(train_transform=transform)\n DefaultInputTransform(train_transform=[transform])\n"
] |
[
[
"torch.nn.Identity"
]
] |
jscarlson/restyle-encoder
|
[
"8b0c97a6e44ad244efe7bd77f80d7201817c314c"
] |
[
"models/mtcnn/mtcnn_pytorch/src/detector.py"
] |
[
"import numpy as np\nimport torch\nfrom .get_nets import PNet, RNet, ONet\nfrom .box_utils import nms, calibrate_box, get_image_boxes, convert_to_square\nfrom .first_stage import run_first_stage\n\n\ndef detect_faces(image, min_face_size=20.0,\n thresholds=[0.6, 0.7, 0.8],\n nms_thresholds=[0.7, 0.7, 0.7]):\n \"\"\"\n Arguments:\n image: an instance of PIL.Image.\n min_face_size: a float number.\n thresholds: a list of length 3.\n nms_thresholds: a list of length 3.\n\n Returns:\n two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10],\n bounding boxes and facial landmarks.\n \"\"\"\n\n # LOAD MODELS\n pnet = PNet()\n rnet = RNet()\n onet = ONet()\n onet.eval()\n\n # BUILD AN IMAGE PYRAMID\n width, height = image.size\n min_length = min(height, width)\n\n min_detection_size = 12\n factor = 0.707 # sqrt(0.5)\n\n # scales for scaling the image\n scales = []\n\n # scales the image so that\n # minimum size that we can detect equals to\n # minimum face size that we want to detect\n m = min_detection_size / min_face_size\n min_length *= m\n\n factor_count = 0\n while min_length > min_detection_size:\n scales.append(m * factor ** factor_count)\n min_length *= factor\n factor_count += 1\n\n # STAGE 1\n\n # it will be returned\n bounding_boxes = []\n\n with torch.no_grad():\n # run P-Net on different scales\n for s in scales:\n boxes = run_first_stage(image, pnet, scale=s, threshold=thresholds[0])\n bounding_boxes.append(boxes)\n\n # collect boxes (and offsets, and scores) from different scales\n bounding_boxes = [i for i in bounding_boxes if i is not None]\n bounding_boxes = np.vstack(bounding_boxes)\n\n keep = nms(bounding_boxes[:, 0:5], nms_thresholds[0])\n bounding_boxes = bounding_boxes[keep]\n\n # use offsets predicted by pnet to transform bounding boxes\n bounding_boxes = calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:])\n # shape [n_boxes, 5]\n\n bounding_boxes = convert_to_square(bounding_boxes)\n bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])\n\n # STAGE 2\n\n img_boxes = get_image_boxes(bounding_boxes, image, size=24)\n img_boxes = torch.FloatTensor(img_boxes)\n\n output = rnet(img_boxes)\n offsets = output[0].data.numpy() # shape [n_boxes, 4]\n probs = output[1].data.numpy() # shape [n_boxes, 2]\n\n keep = np.where(probs[:, 1] > thresholds[1])[0]\n bounding_boxes = bounding_boxes[keep]\n bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))\n offsets = offsets[keep]\n\n keep = nms(bounding_boxes, nms_thresholds[1])\n bounding_boxes = bounding_boxes[keep]\n bounding_boxes = calibrate_box(bounding_boxes, offsets[keep])\n bounding_boxes = convert_to_square(bounding_boxes)\n bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])\n\n # STAGE 3\n\n img_boxes = get_image_boxes(bounding_boxes, image, size=48)\n if len(img_boxes) == 0:\n return [], []\n img_boxes = torch.FloatTensor(img_boxes)\n output = onet(img_boxes)\n landmarks = output[0].data.numpy() # shape [n_boxes, 10]\n offsets = output[1].data.numpy() # shape [n_boxes, 4]\n probs = output[2].data.numpy() # shape [n_boxes, 2]\n\n keep = np.where(probs[:, 1] > thresholds[2])[0]\n bounding_boxes = bounding_boxes[keep]\n bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))\n offsets = offsets[keep]\n landmarks = landmarks[keep]\n\n # compute landmark points\n width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0\n height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0\n xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1]\n landmarks[:, 0:5] = np.expand_dims(xmin, 1) + np.expand_dims(width, 1) * landmarks[:, 0:5]\n landmarks[:, 5:10] = np.expand_dims(ymin, 1) + np.expand_dims(height, 1) * landmarks[:, 5:10]\n\n bounding_boxes = calibrate_box(bounding_boxes, offsets)\n keep = nms(bounding_boxes, nms_thresholds[2], mode='min')\n bounding_boxes = bounding_boxes[keep]\n landmarks = landmarks[keep]\n\n return bounding_boxes, landmarks\n"
] |
[
[
"numpy.expand_dims",
"numpy.round",
"torch.FloatTensor",
"torch.no_grad",
"numpy.where",
"numpy.vstack"
]
] |
raphael-sch/tensor2tensor
|
[
"6ad82d4001145348922e915d383e375c833a929c"
] |
[
"tensor2tensor/data_generators/gym_utils.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for openai gym.\"\"\"\n\n# Dependency imports\n\nimport gym\n\nimport numpy as np\n\nimport six\n\nfrom tensor2tensor.data_generators import image_utils\nfrom tensor2tensor.rl.envs import simulated_batch_env\n\n\nclass WarmupWrapper(gym.Wrapper):\n \"\"\"Warmup wrapper.\"\"\"\n\n def __init__(self, env, warm_up_examples=0):\n gym.Wrapper.__init__(self, env)\n self.warm_up_examples = warm_up_examples\n self.warm_up_action = 0\n self.observation_space = gym.spaces.Box(\n low=0, high=255, shape=(210, 160, 3), dtype=np.uint8)\n\n def get_starting_data(self):\n self.reset()\n starting_observations, starting_actions, starting_rewards = [], [], []\n for _ in range(\n simulated_batch_env.SimulatedBatchEnv.NUMBER_OF_HISTORY_FRAMES):\n observation, rew, _, _ = self.env.step(self.warm_up_action)\n starting_observations.append(observation)\n starting_rewards.append(rew)\n starting_actions.append(self.warm_up_action)\n\n return starting_observations, starting_actions, starting_rewards\n\n def step(self, ac):\n action = ac\n return self.env.step(action)\n\n def reset(self, **kwargs):\n self.env.reset()\n observation = None\n for _ in range(self.warm_up_examples):\n observation, _, _, _ = self.env.step(self.warm_up_action)\n\n return observation\n\n\nclass PongWrapper(WarmupWrapper):\n \"\"\"Pong Wrapper.\"\"\"\n\n def __init__(self, env, warm_up_examples=0,\n action_space_reduction=False,\n reward_skip_steps=0,\n big_ball=False):\n super(PongWrapper, self).__init__(env, warm_up_examples=0)\n self.action_space_reduction = action_space_reduction\n if self.action_space_reduction:\n self.action_space = gym.spaces.Discrete(2)\n self.warm_up_examples = warm_up_examples\n self.observation_space = gym.spaces.Box(\n low=0, high=255, shape=(210, 160, 3), dtype=np.uint8)\n self.reward_skip_steps = reward_skip_steps\n self.big_ball = big_ball\n\n def step(self, ac):\n action = ac\n if self.action_space_reduction:\n action = 2 if int(ac) == 0 else 5\n ob, rew, done, info = self.env.step(action)\n ob = self.process_observation(ob)\n if rew != 0 and self.reward_skip_steps != 0:\n for _ in range(self.reward_skip_steps):\n self.env.step(0)\n return ob, rew, done, info\n\n def reset(self, **kwargs):\n observation = super(PongWrapper, self).reset(**kwargs)\n observation = self.process_observation(observation)\n return observation\n\n def process_observation(self, obs):\n if self.big_ball:\n pos = PongWrapper.find_ball(obs)\n if pos is not None:\n x, y = pos\n obs[x-5:x+5, y-5:y+5, :] = 255\n\n return obs\n\n @staticmethod\n def find_ball(obs, default=None):\n ball_area = obs[37:193, :, 0]\n res = np.argwhere(ball_area == 236)\n if not res:\n return default\n else:\n x, y = res[0]\n x += 37\n return x, y\n\n\ndef wrapped_pong_factory(warm_up_examples=0, action_space_reduction=False,\n reward_skip_steps=0, big_ball=False):\n env = gym.make(\"PongDeterministic-v4\")\n env = env.env # Remove time_limit wrapper.\n env = PongWrapper(env, warm_up_examples=warm_up_examples,\n action_space_reduction=action_space_reduction,\n reward_skip_steps=reward_skip_steps,\n big_ball=big_ball)\n return env\n\n\ngym.envs.register(id=\"T2TPongWarmUp20RewSkip1000Steps-v1\",\n entry_point=lambda: wrapped_pong_factory( # pylint: disable=g-long-lambda\n warm_up_examples=20, reward_skip_steps=15),\n max_episode_steps=200)\n\n\ndef encode_image_to_png(image):\n encoded = six.next(\n image_utils.encode_images_as_png([image]))\n return encoded\n"
] |
[
[
"numpy.argwhere"
]
] |
ZhichaoOuyang/reviews4rec
|
[
"479160aba57271c1109b8857d3cb3a54881898fc"
] |
[
"MPCN/tf_models/exp_model2.py"
] |
[
"#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.metrics import precision_score, recall_score, f1_score\nimport tensorflow as tf\nimport gzip\nimport json\nfrom tqdm import tqdm\nimport random\nfrom collections import Counter\nimport operator\nimport timeit\nimport time\n\nimport datetime\nfrom keras.preprocessing import sequence\n\nfrom .utilities import *\nfrom keras.utils import np_utils\nimport numpy as np\n\nfrom tylib.lib.att_op import *\nfrom tylib.lib.seq_op import *\nfrom tylib.lib.cnn import *\nfrom tylib.lib.compose_op import *\n\nfrom .mpcn import *\n\nclass ExpModel:\n ''' Base model class.\n Multitask - rating prediction and experience ranking\n '''\n def __init__(self, vocab_size, args, char_vocab=0, pos_vocab=0,\n mode='RATING+RANK', num_user=0, num_item=0):\n self.vocab_size = vocab_size\n self.char_vocab = char_vocab\n self.pos_vocab = pos_vocab\n self.graph = tf.Graph()\n self.args = args\n self.imap = {}\n self.inspect_op = []\n self.mode=mode\n self.write_dict = {}\n # For interaction data only (disabled and removed from this repo)\n self.num_user = num_user\n self.num_item = num_item\n print('Creating Model in [{}] mode'.format(self.mode))\n self.feat_prop = None\n if(self.args.init_type=='xavier'):\n self.initializer = tf.contrib.layers.xavier_initializer()\n elif(self.args.init_type=='normal'):\n self.initializer = tf.random_normal_initializer(0.0,\n self.args.init)\n elif(self.args.init_type=='uniform'):\n self.initializer = tf.random_uniform_initializer(\n maxval=self.args.init,\n minval=-self.args.init)\n\n self.cnn_initializer = tf.random_uniform_initializer(\n maxval=self.args.init,\n minval=-self.args.init)\n self.init = self.initializer\n self.temp = []\n self.att1, self.att2 = [],[]\n \n # build graph\n self.build_graph()\n\n def _get_pair_feed_dict(self, data, mode='training', lr=None):\n \"\"\" This is for pairwise ranking and not relevant to this repo.\n \"\"\"\n data = zip(*data)\n labels = data[-1]\n\n if(lr is None):\n lr = self.args.learn_rate\n\n feed_dict = {\n self.q1_inputs:data[self.imap['q1_inputs']],\n self.q2_inputs:data[self.imap['q2_inputs']],\n self.q1_len:data[self.imap['q1_len']],\n self.q2_len:data[self.imap['q2_len']],\n self.learn_rate:lr,\n self.dropout:self.args.dropout,\n self.rnn_dropout:self.args.rnn_dropout,\n self.emb_dropout:self.args.emb_dropout\n }\n if(mode=='training'):\n feed_dict[self.q3_inputs] = data[self.imap['q3_inputs']]\n feed_dict[self.q3_len]=data[self.imap['q3_len']]\n if(mode!='training'):\n feed_dict[self.dropout] = 1.0\n feed_dict[self.rnn_dropout] = 1.0\n feed_dict[self.emb_dropout] = 1.0\n if(self.args.features):\n feed_dict[self.pos_features] = data[6]\n if(mode=='training'):\n feed_dict[self.neg_features] = data[7]\n return feed_dict\n\n def _check_model_type(self):\n if('SOFT' in self.args.rnn_type):\n return 'point'\n elif('SIG_MSE' in self.args.rnn_type \\\n or 'RAW_MSE' in self.args.rnn_type):\n return 'point'\n else:\n return 'pair'\n\n def get_feed_dict(self, data, mode='training', lr=None):\n mdl_type = self._check_model_type()\n if(mdl_type=='point'):\n return self._get_point_feed_dict(data, mode=mode, lr=lr)\n else:\n return self._get_pair_feed_dict(data, mode=mode, lr=lr)\n\n def _get_point_feed_dict(self, data, mode='training', lr=None):\n \"\"\" This is the pointwise feed-dict that is actually used.\n \"\"\"\n data = zip(*data)\n labels = data[-1] # rating \n soft_labels = np.array([[1 if t == i else 0\n for i in range(self.args.num_class)] \\\n for t in labels]) # softmax label?\n sig_labels = labels\n\n if(lr is None):\n lr = self.args.learn_rate\n feed_dict = {\n self.q1_inputs:data[self.imap['q1_inputs']],\n self.q2_inputs:data[self.imap['q2_inputs']],\n self.q1_len:data[self.imap['q1_len']],\n self.q2_len:data[self.imap['q2_len']],\n self.learn_rate:lr,\n self.dropout:self.args.dropout,\n self.rnn_dropout:self.args.rnn_dropout,\n self.emb_dropout:self.args.emb_dropout,\n self.soft_labels:soft_labels,\n self.sig_labels:sig_labels\n }\n if('TNET' in self.args.rnn_type):\n # Use TransNet\n feed_dict[self.trans_inputs] = data[self.imap['trans_inputs']]\n feed_dict[self.trans_len] = data[self.imap['trans_len']]\n if('EXP' in self.args.rnn_type):\n # Add experience ranking\n feed_dict[self.user_idx] = data[self.imap['user_idx']]\n feed_dict[self.item_idx] = data[self.imap['item_idx']]\n feed_dict[self.pair_user_inputs2] = data[self.imap['pair_user_inputs2']]\n feed_dict[self.pair_user_len2] = data[self.imap['pair_user_len2']]\n feed_dict[self.exp_labels] = data[self.imap['exp_labels']]\n\n if(mode!='training'):\n feed_dict[self.dropout] = 1.0\n feed_dict[self.rnn_dropout] = 1.0\n feed_dict[self.emb_dropout] = 1.0\n if(self.args.features):\n feed_dict[self.pos_features] = data[6]\n return feed_dict\n\n def register_index_map(self, idx, target):\n self.imap[target] = idx\n\n # representation\n def _joint_representation(self, q1_embed, q2_embed, q1_len, q2_len, q1_max,\n q2_max, force_model=None, score=1,\n reuse=None, features=None, extract_embed=False,\n side='', c1_embed=None, c2_embed=None, p1_embed=None,\n p2_embed=None, i1_embed=None, i2_embed=None, o1_embed=None,\n o2_embed=None, o1_len=None, o2_len=None, q1_mask=None,\n q2_mask=None):\n \"\"\" Learns a joint representation given q1 and q2.\n \"\"\"\n\n print(\"Learning Repr [{}]\".format(side))\n print(self.q1_embed)\n print(self.q2_embed)\n\n # Extra projection layer\n if('HP' in self.args.rnn_type):\n # Review level Highway layer\n use_mode='HIGH'\n else:\n use_mode='FC'\n\n # projection - word_dim -> h_dim\n if(self.args.translate_proj==1):\n q1_embed = projection_layer(\n q1_embed,\n self.args.rnn_size,\n name='trans_proj',\n activation=tf.nn.relu,\n initializer=self.initializer,\n dropout=self.args.dropout,\n reuse=reuse,\n use_mode=use_mode,\n num_layers=self.args.num_proj,\n return_weights=True,\n is_train=self.is_train\n )\n q2_embed = projection_layer(\n q2_embed,\n self.args.rnn_size,\n name='trans_proj',\n activation=tf.nn.relu,\n initializer=self.initializer,\n dropout=self.args.dropout,\n reuse=True,\n use_mode=use_mode,\n num_layers=self.args.num_proj,\n is_train=self.is_train\n )\n else:\n self.proj_weights = self.embeddings\n\n if(self.args.all_dropout):\n q1_embed = tf.nn.dropout(q1_embed, self.dropout)\n q2_embed = tf.nn.dropout(q2_embed, self.dropout)\n\n representation = None\n att1, att2 = None, None\n if(force_model is not None):\n rnn_type = force_model\n else:\n rnn_type = self.args.rnn_type\n rnn_size = self.args.rnn_size\n q1_output = self.learn_single_repr(q1_embed, q1_len, q1_max,\n rnn_type,\n reuse=reuse, pool=False,\n name='main', mask=q1_mask)\n q2_output = self.learn_single_repr(q2_embed, q2_len, q2_max,\n rnn_type,\n reuse=True, pool=False,\n name='main', mask=q2_mask)\n print(\"==============================================\")\n print('Single Repr:')\n print(q1_output)\n print(q2_output)\n print(\"===============================================\")\n if('DUAL' in rnn_type):\n # D-ATT model\n q1_output = dual_attention(q1_output, self.args.rnn_size,\n initializer=self.initializer,\n reuse=reuse, dropout=self.dropout)\n q2_output = dual_attention(q2_output, self.args.rnn_size,\n initializer=self.initializer,\n reuse=True, dropout=self.dropout)\n if(side=='POS'):\n self.temp = []\n elif('MPCN' in rnn_type):\n # activate MPCN model\n q1_output, q2_output = multi_pointer_coattention_networks(\n self,\n q1_output, q2_output,\n q1_len, q2_len,\n o1_embed, o2_embed,\n o1_len, o2_len,\n rnn_type=self.args.rnn_type,\n reuse=reuse)\n else:\n if('MEAN' in rnn_type):\n # Standard Mean Over Time Baseline\n q1_len = tf.expand_dims(q1_len, 1)\n q2_len = tf.expand_dims(q2_len, 1)\n q1_output = mean_over_time(q1_output, q1_len)\n q2_output = mean_over_time(q2_output, q2_len)\n elif('SUM' in rnn_type):\n q1_output = tf.reduce_sum(q1_output, 1)\n q2_output = tf.reduce_sum(q2_output, 1)\n elif('MAX' in rnn_type):\n q1_output = tf.reduce_max(q1_output, 1)\n q2_output = tf.reduce_max(q2_output, 1)\n elif('LAST' in rnn_type):\n q1_output = last_relevant(q1_output, q1_len)\n q2_output = last_relevant(q2_output, q2_len)\n elif('MM' in rnn_type):\n # max mean pooling\n q1_len = tf.expand_dims(q1_len, 1)\n q2_len = tf.expand_dims(q2_len, 1)\n q1_mean = mean_over_time(q1_output, q1_len)\n q2_mean = mean_over_time(q2_output, q2_len)\n q1_max = tf.reduce_max(q1_output, 1)\n q2_max = tf.reduce_max(q2_output, 1)\n q1_output = tf.concat([q1_mean, q1_max], 1)\n q2_output = tf.concat([q2_mean, q2_max], 1)\n try:\n # For summary statistics\n self.max_norm = tf.reduce_max(tf.norm(q1_output,\n ord='euclidean',\n keep_dims=True, axis=1))\n except:\n self.max_norm = 0\n\n if(extract_embed):\n self.q1_extract = q1_output\n self.q2_extract = q2_output\n\n q1_output = tf.nn.dropout(q1_output, self.dropout)\n q2_output = tf.nn.dropout(q2_output, self.dropout)\n\n # \n if(self.mode=='HREC'):\n # Use Rec Style output\n if('TNET' not in self.args.rnn_type):\n output = self._rec_output(q1_output, q2_output,\n reuse=reuse,\n side=side)\n elif(\"TNET\" in self.args.rnn_type):\n # Learn Repr with CNN\n input_vec = tf.concat([q1_output, q2_output], 1)\n dim = q1_output.get_shape().as_list()[1]\n trans_output = ffn(input_vec, dim,\n self.initializer, name='transform',\n reuse=reuse,\n num_layers=2,\n dropout=None, activation=tf.nn.tanh)\n trans_cnn = self.learn_single_repr(self.trans_embed,\n self.trans_len,\n self.args.smax * 2,\n rnn_type,\n reuse=True, pool=False,\n name='main')\n trans_cnn = tf.reduce_max(trans_cnn, 1)\n self.trans_loss = tf.nn.l2_loss(trans_output - trans_cnn)\n # Alternative predict op using transform\n output = self._rec_output(trans_output, None,\n reuse=reuse,\n side=side,\n name='target')\n\n representation = output\n return output, representation, att1, att2\n\n def learn_single_repr(self, q1_embed, q1_len, q1_max, rnn_type,\n reuse=None, pool=False, name=\"\", mask=None):\n \"\"\" This is the single sequence encoder function.\n rnn_type controls what type of encoder is used.\n Supports neural bag-of-words (NBOW) and CNN encoder\n \"\"\"\n if('NBOW' in rnn_type):\n q1_output = tf.reduce_sum(q1_embed, 1)\n if(pool):\n return q1_embed, q1_output\n elif('CNN' in rnn_type):\n q1_output = build_raw_cnn(q1_embed, self.args.rnn_size,\n filter_sizes=3,\n initializer=self.initializer,\n dropout=self.rnn_dropout, reuse=reuse, name=name) # reuse and name?\n if(pool):\n q1_output = tf.reduce_max(q1_output, 1)\n return q1_embed, q1_output\n #return q1_output, q1_output\n else: # if rnn_type is some kind of rnn, do nothing?\n q1_output = q1_embed\n\n return q1_output\n\n def _rec_output(self, q1_output, q2_output, reuse=None, side=\"\",\n name=''):\n \"\"\" This function supports the final layer outputs of\n recommender models.\n\n Four options: 'DOT','MLP','MF' and 'FM'\n (should be self-explanatory)\n \"\"\"\n print(\"Rec Output\")\n print(q1_output)\n dim = q1_output.get_shape().as_list()[1]\n with tf.variable_scope('rec_out', reuse=reuse) as scope:\n if('DOT' in self.args.rnn_type):\n output = q1_output * q2_output\n output = tf.reduce_sum(output, 1, keep_dims=True)\n elif('MLP' in self.args.rnn_type):\n output = tf.concat([q1_output, q2_output,\n q1_output * q2_output], 1)\n output = ffn(output, self.args.hdim,\n self.initializer,\n name='ffn', reuse=None,\n dropout=self.dropout,\n activation=tf.nn.relu, num_layers=2)\n output = linear(output, 1, self.initializer)\n elif('MF' in self.args.rnn_type):\n output = q1_output * q2_output\n h = tf.get_variable(\n \"hidden\", [dim, 1],\n initializer=self.initializer,\n )\n output = tf.matmul(output, h)\n elif('FM' in self.args.rnn_type):\n if(q2_output is None):\n input_vec = q1_output\n else:\n input_vec = tf.concat([q1_output, q2_output], 1)\n input_vec = tf.nn.dropout(input_vec, self.dropout)\n output, _ = build_fm(input_vec, k=self.args.factor,\n reuse=reuse,\n name=name,\n initializer=self.initializer,\n reshape=False)\n\n if('SIG' in self.args.rnn_type):\n output = tf.nn.sigmoid(output)\n return output\n\n def prepare_hierarchical_input(self):\n \"\"\" Supports hierarchical data input\n Converts word level -> sentence level\n \"\"\"\n # tylib/lib/seq_op/clip_sentence\n # q1_inputs, self.qmax = clip_sentence(self.q1_inputs, self.q1_len)\n # q2_inputs, self.a1max = clip_sentence(self.q2_inputs, self.q2_len)\n # q3_inputs, self.a2max = clip_sentence(self.q3_inputs, self.q3_len)\n\n # Build word-level masks\n self.q1_mask = tf.cast(self.q1_inputs, tf.bool)\n self.q2_mask = tf.cast(self.q2_inputs, tf.bool)\n self.q3_mask = tf.cast(self.q3_inputs, tf.bool)\n\n def make_hmasks(inputs, smax):\n # Hierarchical Masks\n # Inputs are bsz x (dmax * smax)\n inputs = tf.reshape(inputs,[-1, smax]) # -> (bsz * dmax) x smax\n masked_inputs = tf.cast(inputs, tf.bool)\n return masked_inputs\n\n # Build review-level masks\n self.q1_hmask = make_hmasks(self.q1_inputs, self.args.smax)\n self.q2_hmask = make_hmasks(self.q2_inputs, self.args.smax)\n self.q3_hmask = make_hmasks(self.q3_inputs, self.args.smax)\n\n with tf.device('/cpu:0'):\n q1_embed = tf.nn.embedding_lookup(self.embeddings,\n self.q1_inputs)\n q2_embed = tf.nn.embedding_lookup(self.embeddings,\n self.q2_inputs)\n q3_embed = tf.nn.embedding_lookup(self.embeddings,\n self.q3_inputs)\n\n print(\"=============================================\")\n # This is found in nn.py in tylib\n print(\"Hierarchical Flattening\")\n q1_embed, q1_len = hierarchical_flatten(q1_embed,\n self.q1_len,\n self.args.smax)\n q2_embed, q2_len = hierarchical_flatten(q2_embed,\n self.q2_len,\n self.args.smax)\n q3_embed, q3_len = hierarchical_flatten(q3_embed,\n self.q3_len,\n self.args.smax)\n # After flatten -> (bsz * dmax) x smax x dim\n\n # o_emb is q_emb before the learn_single_repr layer\n self.o1_embed = q1_embed\n self.o2_embed = q2_embed\n self.o3_embed = q3_embed\n self.o1_len = q1_len\n self.o2_len = q2_len\n self.o3_len = q3_len\n _, q1_embed = self.learn_single_repr(q1_embed, q1_len, self.args.smax,\n self.args.base_encoder,\n reuse=None, pool=True,\n name='sent', mask=self.q1_hmask)\n _, q2_embed = self.learn_single_repr(q2_embed, q2_len, self.args.smax,\n self.args.base_encoder,\n reuse=True, pool=True,\n name='sent', mask=self.q2_hmask)\n _, q3_embed = self.learn_single_repr(q3_embed, q3_len, self.args.smax,\n self.args.base_encoder,\n reuse=True, pool=True,\n name='sent', mask=self.q3_hmask)\n # According to the paper, each review is represented as a sum of its constituent word embeddings\n # Therefore, q_emb is summed over seq_len dimension -> (bsz * dmax) x dim \n _dim = q1_embed.get_shape().as_list()[1]\n q1_embed = tf.reshape(q1_embed, [-1, self.args.dmax, _dim])\n q2_embed = tf.reshape(q2_embed, [-1, self.args.dmax, _dim])\n q3_embed = tf.reshape(q3_embed, [-1, self.args.dmax, _dim])\n self.q1_embed = q1_embed\n self.q2_embed = q2_embed\n self.q3_embed = q3_embed\n # set value\n self.qmax = self.args.dmax\n self.a1max = self.args.dmax\n self.a2max = self.args.dmax\n # Doesn't support any of these yet\n self.c1_cnn, self.c2_cnn, self.c3_cnn = None, None, None\n self.p1_pos, self.p2_pos, self.p3_pos = None, None, None\n if('TNET' in self.args.rnn_type):\n t_inputs, _ = clip_sentence(self.trans_inputs, self.trans_len)\n self.trans_embed = tf.nn.embedding_lookup(self.embeddings,\n t_inputs)\n print(\"=================================================\")\n\n # prepare flat input\n def prepare_inputs(self):\n \"\"\" Prepares Input\n \"\"\"\n #q1_inputs, self.qmax = clip_sentence(self.q1_inputs, self.q1_len)\n #q2_inputs, self.a1max = clip_sentence(self.q2_inputs, self.q2_len)\n #q3_inputs, self.a2max = clip_sentence(self.q3_inputs, self.q3_len)\n q1_inputs = self.q1_inputs\n q2_inputs = self.q2_inputs\n q3_inputs = self.q3_inputs\n self.qmax = self.args.dmax * self.args.smax\n self.a1max = self.args.dmax * self.args.smax\n self.a2max = self.args.dmax * self.args.smax\n\n self.q1_mask = tf.cast(q1_inputs, tf.bool)\n self.q2_mask = tf.cast(q2_inputs, tf.bool)\n self.q3_mask = tf.cast(q3_inputs, tf.bool)\n\n with tf.device('/cpu:0'):\n q1_embed = tf.nn.embedding_lookup(self.embeddings,\n q1_inputs)\n q2_embed = tf.nn.embedding_lookup(self.embeddings,\n q2_inputs)\n q3_embed = tf.nn.embedding_lookup(self.embeddings,\n q3_inputs)\n\n if(self.args.all_dropout):\n # By default, this is disabled\n q1_embed = tf.nn.dropout(q1_embed, self.emb_dropout)\n q2_embed = tf.nn.dropout(q2_embed, self.emb_dropout)\n q3_embed = tf.nn.dropout(q3_embed, self.emb_dropout)\n\n # Ignore these. :)\n self.c1_cnn, self.c2_cnn, self.c3_cnn = None, None, None\n self.p1_pos, self.p2_pos, self.p3_pos = None, None, None\n\n if('TNET' in self.args.rnn_type):\n t_inputs, _ = clip_sentence(self.trans_inputs, self.trans_len)\n self.trans_embed = tf.nn.embedding_lookup(self.embeddings,\n t_inputs)\n\n if('EXP' in self.args.rnn_type):\n #p_user_inputs1, self.pair_user_q1max = clip_sentence(self.pair_user_inputs1, self.pair_user_len1)\n #p_user_inputs2, self.pair_user_q2max = clip_sentence(self.pair_user_inputs2, self.pair_user_len2)\n p_user_inputs2 = self.pair_user_inputs2\n self.pair_user_q2max = self.args.dmax * self.args.smax \n pair_user_q2_embed = tf.nn.embedding_lookup(self.embeddings,\n p_user_inputs2)\n self.pair_user_q2_embed = tf.nn.dropout(pair_user_q2_embed, self.emb_dropout)\n self.pair_user_q2_mask = tf.cast(p_user_inputs2, tf.bool)\n\n self.q1_embed = q1_embed\n self.q2_embed = q2_embed\n self.q3_embed = q3_embed\n\n def build_graph(self):\n ''' Builds Computational Graph\n '''\n if(self.mode=='HREC' and self.args.base_encoder!='Flat'): # hierarchical model and not flat base_encoder - then len is 2d\n len_shape = [None, None]\n else:\n len_shape = [None]\n\n print(\"Building placeholders with shape={}\".format(len_shape))\n\n with self.graph.as_default():\n self.is_train = tf.get_variable(\"is_train\",\n shape=[],\n dtype=tf.bool,\n trainable=False)\n self.true = tf.constant(True, dtype=tf.bool)\n self.false = tf.constant(False, dtype=tf.bool)\n with tf.name_scope('q1_input'):\n self.q1_inputs = tf.placeholder(tf.int32, shape=[None,\n self.args.qmax], # if qmax changes, tensor shape will also change \n name='q1_inputs')\n with tf.name_scope('q2_input'):\n self.q2_inputs = tf.placeholder(tf.int32, shape=[None,\n self.args.amax],\n name='q2_inputs')\n with tf.name_scope('q3_input'): # could not use\n # supports pairwise mode.\n self.q3_inputs = tf.placeholder(tf.int32, shape=[None,\n self.args.amax],\n name='q3_inputs')\n\n\n self.pair_user_q2max = self.args.qmax\n # if('EXP' in self.args.rnn_type):\n # user_id\n with tf.name_scope('user_idx'):\n self.user_idx = tf.placeholder(tf.int32, shape=[None], name='user_idx')\n # item_id\n with tf.name_scope('item_idx'):\n self.item_idx = tf.placeholder(tf.int32, shape=[None], name='item_idx')\n\n # add pair input\n with tf.name_scope('pair_user_input2'):\n self.pair_user_inputs2 = tf.placeholder(tf.int32, shape=[None,\n self.pair_user_q2max],\n name='pair_user_inputs2')\n with tf.name_scope('pair_user_lengths2'):\n self.pair_user_len2 = tf.placeholder(tf.int32, shape=len_shape)\n\n with tf.name_scope('exp_labels'):\n self.exp_labels = tf.placeholder(tf.float32, shape=[None], name='exp_labels')\n\n with tf.name_scope('dropout'):\n self.dropout = tf.placeholder(tf.float32,\n name='dropout')\n self.rnn_dropout = tf.placeholder(tf.float32,\n name='rnn_dropout')\n self.emb_dropout = tf.placeholder(tf.float32,\n name='emb_dropout')\n with tf.name_scope('q1_lengths'):\n self.q1_len = tf.placeholder(tf.int32, shape=len_shape)\n with tf.name_scope('q2_lengths'):\n self.q2_len = tf.placeholder(tf.int32, shape=len_shape)\n with tf.name_scope('q3_lengths'):\n self.q3_len = tf.placeholder(tf.int32, shape=len_shape)\n with tf.name_scope('learn_rate'):\n self.learn_rate = tf.placeholder(tf.float32, name='learn_rate')\n if(self.args.pretrained==1):\n self.emb_placeholder = tf.placeholder(tf.float32,\n [self.vocab_size, self.args.emb_size])\n\n with tf.name_scope(\"soft_labels\"):\n # softmax cross entropy (not used here)\n data_type = tf.int32\n self.soft_labels = tf.placeholder(data_type,\n shape=[None, self.args.num_class],\n name='softmax_labels')\n\n with tf.name_scope(\"sig_labels\"):\n # sigmoid cross entropy\n self.sig_labels = tf.placeholder(tf.float32,\n shape=[None],\n name='sigmoid_labels')\n self.sig_target = tf.expand_dims(self.sig_labels, 1)\n\n self.batch_size = tf.shape(self.q1_inputs)[0]\n\n with tf.variable_scope('embedding_layer'):\n if(self.args.pretrained==1):\n self.embeddings = tf.Variable(tf.constant(\n 0.0, shape=[self.vocab_size,\n self.args.emb_size]), \\\n trainable=self.args.trainable,\n name=\"embeddings\")\n self.embeddings_init = self.embeddings.assign(\n self.emb_placeholder)\n else:\n self.embeddings = tf.get_variable('embedding',\n [self.vocab_size,\n self.args.emb_size],\n initializer=self.initializer)\n\n self.i1_embed, self.i2_embed, self.i3_embed = None, None, None\n\n if('TNET' in self.args.rnn_type):\n self.trans_inputs = tf.placeholder(tf.int32, shape=[None,\n self.args.smax * 2],\n name='trans_inputs')\n self.trans_len = tf.placeholder(tf.int32, shape=[None])\n\n # prepare inputs\n if(self.mode=='HREC' and self.args.base_encoder!='Flat'):\n # Hierarchical mode\n self.prepare_hierarchical_input() # build o1, o2, o3?\n\n q1_len = tf.cast(tf.count_nonzero(self.q1_len, axis=1),\n tf.int32)\n q2_len = tf.cast(tf.count_nonzero(self.q2_len, axis=1),\n tf.int32)\n q3_len = tf.cast(tf.count_nonzero(self.q3_len, axis=1),\n tf.int32)\n else:\n print(\"Flat Mode..\")\n self.prepare_inputs()\n q1_len = self.q1_len\n q2_len = self.q2_len\n q3_len = self.q3_len\n # o_emb are hierarchical embeddings. therefore in the flat mode, they do not exist!!!\n self.o1_embed = None\n self.o2_embed = None\n self.o3_embed = None\n self.o1_len = None\n self.o2_len = None\n self.o3_len = None\n\n # build model\n # experience-aware latent factor model\n # first version - only consider text to predict experience\n\n rnn_type = self.args.rnn_type\n\n # reuse?\n # user text -> experience emb\n _, q1_output = self.learn_single_repr(self.q1_embed, q1_len, self.qmax,\n rnn_type,\n reuse=None, \n pool=True,\n name='main', mask=self.q1_mask)\n #q2_output = self.learn_single_repr(q2_embed, q2_len, q2_max,\n # rnn_type,\n # reuse=True, pool=False,\n # name='main', mask=q2_mask)\n\n _, pair_user_q2_output = self.learn_single_repr(self.pair_user_q2_embed, self.pair_user_len2, self.pair_user_q2max,\n rnn_type,\n reuse=True, \n pool=True,\n name='main', mask=self.pair_user_q2_mask)\n\n print(\"---Single Representation---\")\n\n # rating\n with tf.variable_scope('user_embedding_layer'):\n #self.P = tf.Variable(tf.random_normal([self.num_user, self.args.factor], stddev=0.01))\n self.P = tf.Variable(tf.random_uniform([self.num_user, self.args.factor], maxval=0.1))\n with tf.variable_scope('item_embedding_layer'):\n #self.Q = tf.Variable(tf.random_normal([self.num_item, self.args.factor], stddev=0.01))\n self.Q = tf.Variable(tf.random_uniform([self.num_item, self.args.factor], maxval=0.1))\n\n with tf.variable_scope('user_bias'):\n #self.B_U = tf.Variable(tf.random_normal([self.num_user], stddev=0.01))\n self.B_U = tf.Variable(tf.random_uniform([self.num_user], maxval=0.1))\n with tf.variable_scope('item_bias'):\n #self.B_I = tf.Variable(tf.random_normal([self.num_item], stddev=0.01))\n self.B_I = tf.Variable(tf.random_uniform([self.num_item], maxval=0.1))\n with tf.variable_scope('global_bias'):\n #self.B_G = tf.Variable(tf.random_normal([1], stddev=0.01))\n self.B_G = tf.Variable(tf.random_uniform([1], maxval=0.1))\n\n user_latent_factor = tf.nn.embedding_lookup(self.P, self.user_idx)\n item_latent_factor = tf.nn.embedding_lookup(self.Q, self.item_idx)\n user_bias = tf.nn.embedding_lookup(self.B_U, self.user_idx)\n item_bias = tf.nn.embedding_lookup(self.B_I, self.item_idx)\n\n # use q1_output to learn experience emb, then concatenate with latent factor model parameters\n exp_B_G = tf.concat([q1_output, tf.tile(tf.expand_dims(self.B_G, 1), [self.batch_size,1])], 1)\n exp_B_G = linear(exp_B_G, 1, self.initializer, name='global_proj')\n exp_user_bias = tf.concat([q1_output, tf.expand_dims(user_bias, 1)], 1)\n exp_user_bias = linear(exp_user_bias, 1, self.initializer, name='user_proj')\n exp_item_bias = tf.concat([q1_output, tf.expand_dims(item_bias, 1)], 1)\n exp_item_bias = linear(exp_item_bias, 1, self.initializer, name='item_proj')\n exp_user_latent_factor = tf.concat([q1_output, user_latent_factor], 1)\n exp_user_latent_factor = linear(exp_user_latent_factor, 1, self.initializer, name='user_latent_proj')\n exp_item_latent_factor = tf.concat([q1_output, item_latent_factor], 1)\n exp_item_latent_factor = linear(exp_item_latent_factor, 1, self.initializer, name='item_latent_proj')\n\n self.output_pos = tf.reduce_sum(tf.multiply(exp_user_latent_factor, exp_item_latent_factor), 1) + exp_user_bias + exp_item_bias + exp_B_G\n\n # experience \n '''\n exp_output_pos = ffn(q1_output, self.args.hdim,\n self.initializer,\n name='ffn', reuse=None,\n dropout=self.dropout,\n activation=tf.nn.relu, num_layers=1)'''\n exp_output_pos = q1_output\n self.exp_output_pos = linear(exp_output_pos, 1, self.initializer, name='exp_pos_proj')\n '''\n exp_output_neg = ffn(pair_user_q2_output, self.args.hdim,\n self.initializer,\n name='ffn', reuse=True,\n dropout=self.dropout,\n activation=tf.nn.relu, num_layers=1)'''\n exp_output_neg = pair_user_q2_output\n #self.exp_output_neg = linear(exp_output_neg, 1, self.initializer, name='exp_neg_proj')\n self.exp_output_neg = linear(exp_output_neg, 1, self.initializer, name='exp_pos_proj', reuse=True)\n\n\n # Define loss and optimizer\n with tf.name_scope(\"train\"):\n with tf.name_scope(\"cost_function\"):\n if(\"SOFT\" in self.args.rnn_type):\n target = self.soft_labels\n if('POINT' in self.args.rnn_type):\n target = tf.argmax(target, 1)\n target = tf.expand_dims(target, 1)\n target = tf.cast(target, tf.float32)\n ce = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.output_pos,\n labels=target)\n else:\n ce = tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=self.output_pos,\n labels=tf.stop_gradient(target))\n self.cost = tf.reduce_mean(ce)\n elif('RAW_MSE' in self.args.rnn_type):\n sig = self.output_pos\n target = tf.expand_dims(self.sig_labels, 1)\n self.cost = tf.reduce_mean(\n tf.square(tf.subtract(target, sig)))\n # add experience ranking loss\n if('EXP' in self.args.rnn_type):\n self.exp_cost = tf.reduce_mean(\n -tf.log(tf.nn.sigmoid(\n (self.exp_output_pos-self.exp_output_neg) * self.exp_labels)))\n self.cost += self.exp_cost\n elif('LOG' in self.args.rnn_type):\n # BPR loss for ranking\n self.cost = tf.reduce_mean(\n -tf.log(tf.nn.sigmoid(\n self.output_pos-self.output_neg)))\n else:\n # Hinge loss for ranking\n self.hinge_loss = tf.maximum(0.0,(\n self.args.margin - self.output_pos \\\n + self.output_neg))\n\n self.cost = tf.reduce_sum(self.hinge_loss)\n\n with tf.name_scope('regularization'):\n if(self.args.l2_reg>0):\n vars = tf.trainable_variables()\n lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars \\\n if 'bias' not in v.name ])\n lossL2 *= self.args.l2_reg\n self.cost += lossL2\n\n # add another experience ranking loss?\n\n # add cost function\n tf.summary.scalar(\"cost_function\", self.cost)\n\n global_step = tf.Variable(0, trainable=False)\n\n if(self.args.dev_lr>0):\n lr = self.learn_rate\n else:\n if(self.args.decay_steps>0):\n lr = tf.train.exponential_decay(self.args.learn_rate,\n global_step,\n self.args.decay_steps,\n self.args.decay_lr,\n staircase=self.args.decay_stairs)\n elif(self.args.decay_lr>0 and self.args.decay_epoch>0):\n decay_epoch = self.args.decay_epoch\n lr = tf.train.exponential_decay(self.args.learn_rate,\n global_step,\n decay_epoch * self.args.batch_size,\n self.args.decay_lr, staircase=True)\n else:\n lr = self.args.learn_rate\n\n control_deps = []\n\n with tf.name_scope('optimizer'):\n if(self.args.opt=='SGD'):\n self.opt = tf.train.GradientDescentOptimizer(\n learning_rate=lr)\n elif(self.args.opt=='Adam'):\n self.opt = tf.train.AdamOptimizer(\n learning_rate=lr)\n elif(self.args.opt=='Adadelta'):\n self.opt = tf.train.AdadeltaOptimizer(\n learning_rate=lr)\n elif(self.args.opt=='Adagrad'):\n self.opt = tf.train.AdagradOptimizer(\n learning_rate=lr)\n elif(self.args.opt=='RMS'):\n self.opt = tf.train.RMSPropOptimizer(\n learning_rate=lr)\n elif(self.args.opt=='Moment'):\n self.opt = tf.train.MomentumOptimizer(lr, 0.9)\n\n # Use SGD at the end for better local minima\n self.opt2 = tf.train.GradientDescentOptimizer(\n learning_rate=self.args.wiggle_lr)\n tvars = tf.trainable_variables()\n def _none_to_zero(grads, var_list):\n return [grad if grad is not None else tf.zeros_like(var)\n for var, grad in zip(var_list, grads)]\n if(self.args.clip_norm>0):\n grads, _ = tf.clip_by_global_norm(\n tf.gradients(self.cost, tvars),\n self.args.clip_norm)\n with tf.name_scope('gradients'):\n gradients = self.opt.compute_gradients(self.cost)\n def ClipIfNotNone(grad):\n if grad is None:\n return grad\n grad = tf.clip_by_value(grad, -10, 10, name=None)\n return tf.clip_by_norm(grad, self.args.clip_norm)\n if(self.args.clip_norm>0):\n clip_g = [(ClipIfNotNone(grad), var) for grad, var in gradients]\n else:\n clip_g = [(grad,var) for grad,var in gradients]\n\n # Control dependency for center loss\n with tf.control_dependencies(control_deps):\n self.train_op = self.opt.apply_gradients(clip_g,\n global_step=global_step)\n self.wiggle_op = self.opt2.apply_gradients(clip_g,\n global_step=global_step)\n else:\n with tf.control_dependencies(control_deps):\n self.train_op = self.opt.minimize(self.cost)\n self.wiggle_op = self.opt2.minimize(self.cost)\n\n self.grads = _none_to_zero(tf.gradients(self.cost,tvars), tvars)\n # grads_hist = [tf.summary.histogram(\"grads_{}\".format(i), k) for i, k in enumerate(self.grads) if k is not None]\n self.merged_summary_op = tf.summary.merge_all(key=tf.GraphKeys.SUMMARIES)\n # model_stats()\n\n # for Inference\n self.predict_op = self.output_pos\n self.predict_op2 = self.exp_output_pos\n if('RAW_MSE' in self.args.rnn_type):\n self.predict_op = tf.clip_by_value(self.predict_op, 1, 5)\n if('SOFT' in self.args.rnn_type):\n if('POINT' in self.args.rnn_type):\n predict_neg = 1 - self.predict_op\n self.predict_op = tf.concat([predict_neg,\n self.predict_op], 1)\n else:\n self.predict_op = tf.nn.softmax(self.output_pos)\n self.predictions = tf.argmax(self.predict_op, 1)\n self.correct_prediction = tf.equal(tf.argmax(self.predict_op, 1),\n tf.argmax(self.soft_labels, 1))\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction,\n \"float\"))\n"
] |
[
[
"tensorflow.device",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.count_nonzero",
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.nn.l2_loss",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.Graph",
"tensorflow.Variable",
"tensorflow.random_uniform_initializer",
"tensorflow.gradients",
"tensorflow.train.exponential_decay",
"tensorflow.clip_by_norm",
"tensorflow.stop_gradient",
"tensorflow.subtract",
"tensorflow.train.MomentumOptimizer",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.name_scope",
"tensorflow.trainable_variables",
"tensorflow.argmax",
"tensorflow.random_normal_initializer",
"tensorflow.nn.dropout",
"tensorflow.norm",
"tensorflow.matmul",
"tensorflow.nn.sigmoid",
"tensorflow.train.AdagradOptimizer",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.train.AdadeltaOptimizer",
"tensorflow.zeros_like",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.summary.merge_all",
"tensorflow.nn.embedding_lookup",
"tensorflow.clip_by_value",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.multiply",
"tensorflow.reduce_mean",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.variable_scope",
"tensorflow.random_uniform"
]
] |
vogelta/vogelta.github.io
|
[
"24c354db961e5e620557c8f4d4d960bea05f3f7d"
] |
[
"Parameters/MNIST_MinCOOL.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom data_wrappers import mnist_wrapper\nfrom utils import Layer, cool_layer\n\n\ndata_wrap = mnist_wrapper\nimage_dim = [28,28,1]\nn_classes = 10\n\nbatch_size = 50\nn_batches = 40000\ncheck_every = 1000\n\noptimizer = tf.train.AdamOptimizer\nlearning_rate = 4e-4\ndecay_function = tf.train.piecewise_constant\ndecay_params = [[20000], [learning_rate, 2e-4]]\n\nw_init = tf.truncated_normal_initializer(stddev=0.1)\nb_init = tf.constant_initializer(0.1)\ndoo = 5\n\nlayers_to_track_dead_units = [0,2,5]\nlayers = [Layer(tf.contrib.layers.convolution2d,\n {'num_outputs': 32, \n 'kernel_size': 5, \n 'activation_fn': tf.nn.relu,\n 'weights_initializer': w_init,\n 'biases_initializer': b_init,\n 'trainable': True}),\n Layer(tf.contrib.layers.max_pool2d, {'kernel_size': 2}),\n Layer(tf.contrib.layers.convolution2d,\n {'num_outputs': 64, \n 'kernel_size': 5, \n 'activation_fn': tf.nn.relu,\n 'weights_initializer': w_init,\n 'biases_initializer': b_init,\n 'trainable': True}),\n Layer(tf.contrib.layers.max_pool2d, {'kernel_size': 2}),\n Layer(tf.contrib.layers.flatten),\n Layer(tf.contrib.layers.fully_connected,\n {'num_outputs': 1024,\n 'activation_fn': tf.nn.relu,\n 'weights_initializer': w_init,\n 'biases_initializer': b_init,\n 'trainable': True}),\n Layer(tf.contrib.layers.fully_connected,\n {'num_outputs': n_classes*doo,\n 'activation_fn': tf.nn.softmax,\n 'weights_initializer': w_init,\n 'biases_initializer': b_init,\n 'trainable': True}),\n Layer(cool_layer, {'doo': doo, 'mode': 'min'}, ['is_training'])]\n"
] |
[
[
"tensorflow.constant_initializer",
"tensorflow.truncated_normal_initializer"
]
] |
gem763/stockpricer
|
[
"f391617725897f5ad47e2e8cf1cfe22ffd657107"
] |
[
"stockpricer/model.py"
] |
[
"import numpy as np\nimport pandas as pd\n\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.decomposition import PCA, KernelPCA, FastICA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier\nfrom sklearn.preprocessing import StandardScaler, Imputer, MinMaxScaler\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom sklearn.pipeline import Pipeline as skpipe\nfrom sklearn.tree import DecisionTreeClassifier\n\n\n\nclass Pricer:\n def __init__(self, prices_df, scaler=StandardScaler(), reducer=PCA(n_components=0.9)):\n self.p = prices_df\n self.log_p = np.log(prices_df)\n self.scaler = scaler\n self.reducer = reducer\n self.T, self.log_p_model = self._modeling()\n \n\n #@classmethod\n #def from_db(cls, asof, field='adj', n_bars=250, n_freq=1, univ='k200', scaler=StandardScaler(), reducer=PCA(n_components=0.9)):\n # p = read_trailing_to(field, asof, n_bars, n_freq, univ).dropna(axis=1, how='any')\n # return cls(p, scaler, reducer)\n \n \n def _modeling(self):\n log_p_scaled = self.scaler.fit_transform(self.log_p)\n T = self.reducer.fit_transform(log_p_scaled)\n log_p_model = self.scaler.inverse_transform(self.reducer.inverse_transform(T))\n # 참고: reducer.inverse_transform(T) = T.dot(reducer.components_)\n return T, pd.DataFrame(log_p_model, index=self.log_p.index, columns=self.log_p.columns)\n\n \n def plot_of(self, target, scale=None):\n if type(target) is str:\n target = self.p.columns.get_loc(target)\n\n symbol = self.p.columns[target]\n params = dict(legend=True, figsize=(8,5))\n \n if scale=='log':\n self.log_p.iloc[:,target].plot(color='r', label=symbol, **params)\n self.log_p_model.iloc[:,target].plot(color='k', label=str(symbol)+'(model)', **params)\n \n else:\n self.p.iloc[:,target].plot(color='r', label=symbol, **params)\n np.exp(self.log_p_model).iloc[:,target].plot(color='k', label=str(symbol)+'(model)', **params)\n \n \n def dislocation(self, method=None):\n if (method==None) or (method=='return'):\n return self.log_p - self.log_p_model\n \n elif method=='z':\n diff = self.dislocation()\n std = np.sqrt((diff**2).sum(axis=0) / len(diff))\n return diff / std\n \n elif method=='pct_rank':\n diff = self.dislocation()\n return diff.rank(axis=0, pct=True)\n \n elif method=='direction':\n diff = self.dislocation()\n diff[diff>0] = 1\n diff[diff<0] = -1\n return diff\n \n \n def projection(self, n_proj, what='model', method='kalman'):\n if what=='model':\n return self.log_p_model.diff(n_proj)\n \n elif what=='market':\n return self.log_p.diff(n_proj)"
] |
[
[
"numpy.log",
"pandas.DataFrame",
"sklearn.preprocessing.StandardScaler",
"numpy.exp",
"sklearn.decomposition.PCA"
]
] |
ComicShrimp/scikit-image-filters
|
[
"973ef190bf4dc74c8c9a89ffe8092d94d51d4bbd"
] |
[
"src/niblack_sauvola_thresholding.py"
] |
[
"import matplotlib\nimport matplotlib.pyplot as plt\n\nfrom skimage.data import page\nfrom skimage.filters import threshold_otsu, threshold_niblack, threshold_sauvola\nfrom skimage import io\nfrom skimage.color import rgb2gray\n\n\nimage = io.imread(\"./images/book.jpg\")\nimage_in_greyscale = rgb2gray(image)\n\nmatplotlib.rcParams[\"font.size\"] = 9\n\n\nimage = image_in_greyscale\nbinary_global = image > threshold_otsu(image)\n\nwindow_size = 25\nthresh_niblack = threshold_niblack(image, window_size=window_size, k=0.8)\nthresh_sauvola = threshold_sauvola(image, window_size=window_size)\n\nbinary_niblack = image > thresh_niblack\nbinary_sauvola = image > thresh_sauvola\n\nplt.figure(figsize=(8, 7))\nplt.subplot(2, 2, 1)\nplt.imshow(image, cmap=plt.cm.gray)\nplt.title(\"Original\")\nplt.axis(\"off\")\n\nplt.subplot(2, 2, 2)\nplt.title(\"Global Threshold\")\nplt.imshow(binary_global, cmap=plt.cm.gray)\nplt.axis(\"off\")\n\nplt.subplot(2, 2, 3)\nplt.imshow(binary_niblack, cmap=plt.cm.gray)\nplt.title(\"Niblack Threshold\")\nplt.axis(\"off\")\n\nplt.subplot(2, 2, 4)\nplt.imshow(binary_sauvola, cmap=plt.cm.gray)\nplt.title(\"Sauvola Threshold\")\nplt.axis(\"off\")\n\nplt.show()"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
nkoep/matplotlib
|
[
"6ed04252994443a4cecf95f0da0efedb6d514b38",
"6ed04252994443a4cecf95f0da0efedb6d514b38",
"6ed04252994443a4cecf95f0da0efedb6d514b38",
"c9898ea9a30c67c579ab27cd61b68e2abae0fb0e"
] |
[
"examples/pylab_examples/line_collection.py",
"examples/pylab_examples/data_helper.py",
"examples/pylab_examples/gradient_bar.py",
"examples/specialty_plots/topographic_hillshading.py"
] |
[
"import matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.colors import colorConverter\n\nimport numpy as np\n\n# In order to efficiently plot many lines in a single set of axes,\n# Matplotlib has the ability to add the lines all at once. Here is a\n# simple example showing how it is done.\n\nx = np.arange(100)\n# Here are many sets of y to plot vs x\nys = x[:50, np.newaxis] + x[np.newaxis, :]\n\nsegs = np.zeros((50, 100, 2), float)\nsegs[:,:,1] = ys\nsegs[:,:,0] = x\n\n# Mask some values to test masked array support:\nsegs = np.ma.masked_where((segs > 50) & (segs < 60), segs)\n\n# We need to set the plot limits.\nax = plt.axes()\nax.set_xlim(x.min(), x.max())\nax.set_ylim(ys.min(), ys.max())\n\n# colors is sequence of rgba tuples\n# linestyle is a string or dash tuple. Legal string values are\n# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq)\n# where onoffseq is an even length tuple of on and off ink in points.\n# If linestyle is omitted, 'solid' is used\n# See matplotlib.collections.LineCollection for more information\nline_segments = LineCollection(segs,\n linewidths = (0.5,1,1.5,2),\n colors = [colorConverter.to_rgba(i) \\\n for i in ('b','g','r','c','m','y','k')],\n linestyle = 'solid')\nax.add_collection(line_segments)\nax.set_title('Line collection with masked arrays')\nplt.show()\n",
"#!/usr/bin/env python\n# Some functions to load a return data for the plot demos\n\nfrom numpy import fromstring, argsort, take, array, resize\nimport matplotlib.cbook as cbook\n\n\ndef get_two_stock_data():\n \"\"\"\n load stock time and price data for two stocks The return values\n (d1,p1,d2,p2) are the trade time (in days) and prices for stocks 1\n and 2 (intc and aapl)\n \"\"\"\n ticker1, ticker2 = 'INTC', 'AAPL'\n\n file1 = cbook.get_sample_data('INTC.dat.gz')\n file2 = cbook.get_sample_data('AAPL.dat.gz')\n M1 = fromstring( file1.read(), '<d')\n\n M1 = resize(M1, (M1.shape[0]/2,2) )\n\n M2 = fromstring( file2.read(), '<d')\n M2 = resize(M2, (M2.shape[0]/2,2) )\n\n d1, p1 = M1[:,0], M1[:,1]\n d2, p2 = M2[:,0], M2[:,1]\n return (d1,p1,d2,p2)\n\n\ndef get_daily_data():\n \"\"\"\n return stock1 and stock2 instances, each of which have attributes\n\n open, high, low, close, volume\n\n as numeric arrays\n\n \"\"\"\n class C: pass\n def get_ticker(ticker):\n vals = []\n\n datafile = cbook.get_sample_data('%s.csv'%ticker, asfileobj=False)\n\n lines = open(datafile).readlines()\n for line in lines[1:]:\n vals.append([float(val) for val in line.split(',')[1:]])\n\n M = array(vals)\n c = C()\n c.open = M[:,0]\n c.high = M[:,1]\n c.low = M[:,2]\n c.close = M[:,3]\n c.volume = M[:,4]\n return c\n c1 = get_ticker('intc')\n c2 = get_ticker('msft')\n return c1, c2\n",
"from matplotlib.pyplot import figure, show, cm\nfrom numpy import arange\nfrom numpy.random import rand\n\n\ndef gbar(ax, x, y, width=0.5, bottom=0):\n X = [[.6, .6],[.7,.7]]\n for left,top in zip(x, y):\n right = left+width\n ax.imshow(X, interpolation='bicubic', cmap=cm.Blues,\n extent=(left, right, bottom, top), alpha=1)\n\nfig = figure()\n\nxmin, xmax = xlim = 0,10\nymin, ymax = ylim = 0,1\nax = fig.add_subplot(111, xlim=xlim, ylim=ylim,\n autoscale_on=False)\nX = [[.6, .6],[.7,.7]]\n\nax.imshow(X, interpolation='bicubic', cmap=cm.copper,\n extent=(xmin, xmax, ymin, ymax), alpha=1)\n\nN = 10\nx = arange(N)+0.25\ny = rand(N)\ngbar(ax, x, y, width=0.7)\nax.set_aspect('auto')\nshow()\n",
"\"\"\"\nDemonstrates the visual effect of varying blend mode and vertical exaggeration\non \"hillshaded\" plots.\n\nNote that the \"overlay\" and \"soft\" blend modes work well for complex surfaces\nsuch as this example, while the default \"hsv\" blend mode works best for smooth\nsurfaces such as many mathematical functions.\n\nIn most cases, hillshading is used purely for visual purposes, and *dx*/*dy*\ncan be safely ignored. In that case, you can tweak *vert_exag* (vertical\nexaggeration) by trial and error to give the desired visual effect. However,\nthis example demonstrates how to use the *dx* and *dy* kwargs to ensure that\nthe *vert_exag* parameter is the true vertical exaggeration.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.cbook import get_sample_data\nfrom matplotlib.colors import LightSource\n\ndem = np.load(get_sample_data('jacksboro_fault_dem.npz'))\nz = dem['elevation']\n\n#-- Optional dx and dy for accurate vertical exaggeration --------------------\n# If you need topographically accurate vertical exaggeration, or you don't want\n# to guess at what *vert_exag* should be, you'll need to specify the cellsize\n# of the grid (i.e. the *dx* and *dy* parameters). Otherwise, any *vert_exag*\n# value you specify will be realitive to the grid spacing of your input data\n# (in other words, *dx* and *dy* default to 1.0, and *vert_exag* is calculated\n# relative to those parameters). Similarly, *dx* and *dy* are assumed to be in\n# the same units as your input z-values. Therefore, we'll need to convert the\n# given dx and dy from decimal degrees to meters.\ndx, dy = dem['dx'], dem['dy']\ndy = 111200 * dy\ndx = 111200 * dx * np.cos(np.radians(dem['ymin']))\n#-----------------------------------------------------------------------------\n\n# Shade from the northwest, with the sun 45 degrees from horizontal\nls = LightSource(azdeg=315, altdeg=45)\ncmap = plt.cm.gist_earth\n\nfig, axes = plt.subplots(nrows=4, ncols=3, figsize=(8, 9))\nplt.setp(axes.flat, xticks=[], yticks=[])\n\n# Vary vertical exaggeration and blend mode and plot all combinations\nfor col, ve in zip(axes.T, [0.1, 1, 10]):\n # Show the hillshade intensity image in the first row\n col[0].imshow(ls.hillshade(z, vert_exag=ve, dx=dx, dy=dy), cmap='gray')\n\n # Place hillshaded plots with different blend modes in the rest of the rows\n for ax, mode in zip(col[1:], ['hsv', 'overlay', 'soft']):\n rgb = ls.shade(z, cmap=cmap, blend_mode=mode,\n vert_exag=ve, dx=dx, dy=dy)\n ax.imshow(rgb)\n\n# Label rows and columns\nfor ax, ve in zip(axes[0], [0.1, 1, 10]):\n ax.set_title('{}'.format(ve), size=18)\nfor ax, mode in zip(axes[:,0], ['Hillshade', 'hsv', 'overlay', 'soft']):\n ax.set_ylabel(mode, size=18)\n\n# Group labels...\naxes[0,1].annotate('Vertical Exaggeration', (0.5, 1), xytext=(0, 30),\n textcoords='offset points', xycoords='axes fraction',\n ha='center', va='bottom', size=20)\naxes[2,0].annotate('Blend Mode', (0, 0.5), xytext=(-30, 0),\n textcoords='offset points', xycoords='axes fraction',\n ha='right', va='center', size=20, rotation=90)\nfig.subplots_adjust(bottom=0.05, right=0.95)\n\nplt.show()\n"
] |
[
[
"numpy.arange",
"matplotlib.pyplot.axes",
"matplotlib.colors.colorConverter.to_rgba",
"numpy.ma.masked_where",
"numpy.zeros",
"matplotlib.pyplot.show"
],
[
"matplotlib.cbook.get_sample_data",
"numpy.array",
"numpy.resize"
],
[
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.random.rand",
"matplotlib.pyplot.figure"
],
[
"numpy.radians",
"matplotlib.pyplot.subplots",
"matplotlib.colors.LightSource",
"matplotlib.pyplot.setp",
"matplotlib.cbook.get_sample_data",
"matplotlib.pyplot.show"
]
] |
flowersteam/mlagents-environments
|
[
"140415b209460d5c8c2e57dc91f874c24de6d6cb"
] |
[
"ml-agents/mlagents/envs/brain.py"
] |
[
"import logging\nimport numpy as np\nimport io\nimport orjson\n\nfrom typing import Dict\nfrom PIL import Image\n\nlogger = logging.getLogger(\"mlagents.envs\")\n\n\nclass BrainInfo:\n def __init__(self, visual_observation, vector_observation, text_observations, info_dict, memory=None, reward=None, agents=None, local_done=None, vector_action=None, text_action=None, max_reached=None, action_mask=None):\n \"\"\"\n Describes experience at current step of all agents linked to a brain.\n \"\"\"\n self.visual_observations = visual_observation\n self.vector_observations = vector_observation\n self.text_observations = text_observations\n self.info_dict = info_dict\n self.memories = memory\n self.rewards = reward\n self.local_done = local_done\n self.max_reached = max_reached\n self.agents = agents\n self.previous_vector_actions = vector_action\n self.previous_text_actions = text_action\n self.action_masks = action_mask\n\n @staticmethod\n def process_pixels(image_bytes, gray_scale):\n \"\"\"\n Converts byte array observation image into numpy array, re-sizes it,\n and optionally converts it to grey scale\n :param gray_scale: Whether to convert the image to grayscale.\n :param image_bytes: input byte array corresponding to image\n :return: processed numpy array of observation from environment\n \"\"\"\n s = bytearray(image_bytes)\n image = Image.open(io.BytesIO(s))\n s = np.array(image) / 255.0\n if gray_scale:\n s = np.mean(s, axis=2)\n s = np.reshape(s, [s.shape[0], s.shape[1], 1])\n return s\n\n @staticmethod\n def from_agent_proto(agent_info_list, brain_params):\n \"\"\"\n Converts list of agent infos to BrainInfo.\n \"\"\"\n vis_obs = []\n for i in range(brain_params.number_visual_observations):\n obs = [BrainInfo.process_pixels(x.visual_observations[i],\n brain_params.camera_resolutions[i]['blackAndWhite'])\n for x in agent_info_list]\n vis_obs += [np.array(obs)]\n if len(agent_info_list) == 0:\n memory_size = 0\n else:\n memory_size = max([len(x.memories) for x in agent_info_list])\n if memory_size == 0:\n memory = np.zeros((0, 0))\n else:\n [x.memories.extend([0] * (memory_size - len(x.memories))) for x in agent_info_list]\n memory = np.array([x.memories for x in agent_info_list])\n total_num_actions = sum(brain_params.vector_action_space_size)\n mask_actions = np.ones((len(agent_info_list), total_num_actions))\n for agent_index, agent_info in enumerate(agent_info_list):\n if agent_info.action_mask is not None:\n if len(agent_info.action_mask) == total_num_actions:\n mask_actions[agent_index, :] = [\n 0 if agent_info.action_mask[k] else 1 for k in range(total_num_actions)]\n if any([np.isnan(x.reward) for x in agent_info_list]):\n logger.warning(\"An agent had a NaN reward for brain \" + brain_params.brain_name)\n if any([np.isnan(x.stacked_vector_observation).any() for x in agent_info_list]):\n logger.warning(\"An agent had a NaN observation for brain \" + brain_params.brain_name)\n brain_info = BrainInfo(\n visual_observation=vis_obs,\n vector_observation=np.nan_to_num(\n np.array([x.stacked_vector_observation for x in agent_info_list])),\n text_observations=[x.text_observation for x in agent_info_list],\n info_dict = [orjson.loads(x.info_dict) for x in agent_info_list],\n memory=memory,\n reward=[x.reward if not np.isnan(x.reward) else 0 for x in agent_info_list],\n agents=[x.id for x in agent_info_list],\n local_done=[x.done for x in agent_info_list],\n vector_action=np.array([x.stored_vector_actions for x in agent_info_list]),\n text_action=[x.stored_text_actions for x in agent_info_list],\n max_reached=[x.max_step_reached for x in agent_info_list],\n action_mask=mask_actions\n )\n print([x.info_dict for x in agent_info_list])\n return brain_info\n\n\n# Renaming of dictionary of brain name to BrainInfo for clarity\nAllBrainInfo = Dict[str, BrainInfo]\n\n\nclass BrainParameters:\n def __init__(self, brain_name, vector_observation_space_size, num_stacked_vector_observations,\n camera_resolutions, vector_action_space_size,\n vector_action_descriptions, vector_action_space_type):\n \"\"\"\n Contains all brain-specific parameters.\n \"\"\"\n self.brain_name = brain_name\n self.vector_observation_space_size = vector_observation_space_size\n self.num_stacked_vector_observations = num_stacked_vector_observations\n self.number_visual_observations = len(camera_resolutions)\n self.camera_resolutions = camera_resolutions\n self.vector_action_space_size = vector_action_space_size\n self.vector_action_descriptions = vector_action_descriptions\n self.vector_action_space_type = [\"discrete\", \"continuous\"][vector_action_space_type]\n\n def __str__(self):\n return '''Unity brain name: {}\n Number of Visual Observations (per agent): {}\n Vector Observation space size (per agent): {}\n Number of stacked Vector Observation: {}\n Vector Action space type: {}\n Vector Action space size (per agent): {}\n Vector Action descriptions: {}'''.format(self.brain_name,\n str(self.number_visual_observations),\n str(self.vector_observation_space_size),\n str(self.num_stacked_vector_observations),\n self.vector_action_space_type,\n str(self.vector_action_space_size),\n ', '.join(self.vector_action_descriptions))\n\n @staticmethod\n def from_proto(brain_param_proto):\n \"\"\"\n Converts brain parameter proto to BrainParameter object.\n :param brain_param_proto: protobuf object.\n :return: BrainParameter object.\n \"\"\"\n resolution = [{\n \"height\": x.height,\n \"width\": x.width,\n \"blackAndWhite\": x.gray_scale\n } for x in brain_param_proto.camera_resolutions]\n brain_params = BrainParameters(brain_param_proto.brain_name,\n brain_param_proto.vector_observation_size,\n brain_param_proto.num_stacked_vector_observations,\n resolution,\n brain_param_proto.vector_action_size,\n brain_param_proto.vector_action_descriptions,\n brain_param_proto.vector_action_space_type)\n return brain_params\n"
] |
[
[
"numpy.reshape",
"numpy.isnan",
"numpy.mean",
"numpy.array",
"numpy.zeros"
]
] |
ArnaudHemmerle/FluoJupyter
|
[
"ed348c04604d828b9e40a86d4427d9fc2bcc4534"
] |
[
"lib/extraction/XRF.py"
] |
[
"'''\nModule for extracting, plotting, and saving an XRF scan.\n'''\nimport os\nimport sys\nimport io\nfrom contextlib import redirect_stdout\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\nfrom lib.extraction import PyNexus as PN\n\n# Define colors for prints\n_RED='\\x1b[31;01m'\n_RESET=\"\\x1b[0m\"\n_BLUE='\\x1b[34;01m'\n\ndef Treat(nxs_filename, recording_dir, list_elems,\n absorbers='', logz=True, first_channel=0, last_channel=2048,\n use_eV=False, gain=10., eV0=0., arr_peaks=None, working_dir='', fast=True,\n show_data_stamps=False, plot_spectrogram=False, plot_sum=False, plot_first_last=False,\n save=False, verbose=False):\n '''\n Call functions for extracting, plotting, and saving an XRF scan.\n\n Parameters\n ----------\n nxs_filename : str\n Nexus filename.\n recording_dir : str\n Directory where the nexus file is stored.\n list_elems : ndarray\n An array with the elements to extract, for ex. list_elems = [1, 2, 3].\n absorbers : str, optional\n Text to display indicating which absorber was used.\n logz : bool, optional\n Log scale on the plots.\n first_channel : int, optional\n The spectrums will be extracted between first_channel and last_channel.\n last_channel : int, optional\n The spectrums will be extracted between first_channel and last_channel.\n use_eV : bool, optional\n Convert the channels to eVs.\n gain : float, optional\n Channels are converted to eVs following eVs = gain*channel+eV0.\n ev0 : float, optional\n Channels are converted to eVs following eVs = gain*channel+eV0.\n arr_peaks : ndarray, optional\n An array with the peaks to display, for ex. arr_peaks = [('Elastic', '12000.'), ('Compton', '11670.')].\n working_dir : str, optional\n Directory where the treated files will be stored.\n fast : bool, optional\n Trigger fast extract of the nexus file.\n show_data_stamps : bool, optional\n Print the list of sensors from the nexus file.\n plot_spectrogram : bool, optional\n Plot the spectrogram.\n plot_sum : bool, optional\n Plot the sum of the spectrums over time.\n plot_first_last : bool, optional\n Plot the first and last spectrum.\n save : bool, optional\n Save the GIXD.\n verbose : bool, optional\n Verbose mode.\n\n Returns\n -------\n ndarray\n channels, an array containing the channels.\n ndarray\n eVs, an array containing the channels converted to eVs.\n ndarray\n spectrums, an array containing the spectrums.\n\n Raises\n ------\n SystemExit('Nexus not found')\n when Nexus file is not found.\n SystemExit('ICR not found')\n when no ICR is found (most likely because the wrong elements were given).\n '''\n\n # Deal with empty arr_peaks\n if arr_peaks is None:\n arr_peaks = [(None,None)]\n\n channels, eVs, spectrums, first_non_zero_spectrum, last_non_zero_spectrum = \\\n Extract(nxs_filename, recording_dir,\n list_elems, first_channel, last_channel,\n gain, eV0, fast, show_data_stamps, verbose)\n\n if plot_spectrogram or plot_first_last or plot_sum:\n Plot(channels, eVs, spectrums, first_non_zero_spectrum, last_non_zero_spectrum,\n use_eV, arr_peaks, absorbers, logz,\n nxs_filename, plot_spectrogram, plot_sum, plot_first_last)\n\n if save:\n Save(nxs_filename, recording_dir, fast, working_dir, verbose)\n\n return channels, eVs, spectrums\n\ndef Extract(nxs_filename, recording_dir,\n list_elems, first_channel, last_channel,\n gain, eV0, fast, show_data_stamps, verbose):\n '''\n Extract the nexus scan and return useful quantities for XRF.\n\n Parameters\n ----------\n nxs_filename : str\n Nexus filename.\n recording_dir : str\n Directory where the nexus file is stored.\n list_elems : ndarray\n An array with the elements to extract, for ex. list_elems = [1, 2, 3].\n first_channel : int, optional\n The spectrums will be extracted between first_channel and last_channel.\n last_channel : int\n The spectrums will be extracted between first_channel and last_channel.\n gain : float\n Channels are converted to eVs following eVs = gain*channel+eV0.\n ev0 : float\n Channels are converted to eVs following eVs = gain*channel+eV0.\n fast : bool, optional\n Trigger fast extract of the nexus file.\n show_data_stamps : bool, optional\n Print the list of sensors from the nexus file.\n verbose : bool, optional\n Verbose mode.\n\n Returns\n -------\n ndarray\n channels, an array containing the channels.\n ndarray\n eVs, an array containing the channels converted to eVs.\n ndarray\n spectrums, an array containing the spectrums.\n int\n first_non_zero_spectrum, index of the first scan extracted.\n int\n last_non_zero_spectrum, index of the last scan extracted.\n\n Raises\n ------\n SystemExit('Nexus not found')\n when Nexus file is not found.\n SystemExit('ICR not found')\n when no ICR is found (most likely because the wrong elements were given).\n '''\n nxs_path = recording_dir+nxs_filename\n\n if not os.path.isfile(nxs_path):\n print(_RED+'Scan %s seems not to exist in recording directory'%(nxs_filename)+_RESET)\n print(('\\t\\t recording directory : '+recording_dir))\n sys.exit('Nexus not found')\n\n else:\n\n if verbose:\n print(_BLUE+\" - Open Nexus Data File :\"+ _RESET)\n if verbose:\n print('\\t'+nxs_path)\n try:\n nexus=PN.PyNexusFile(nxs_path, fast=fast)\n except OSError:\n print(_RED,'\\t Nexus file seems not to exist or is not correct',_RESET)\n sys.exit('Nexus not found')\n\n nbpts=np.int(nexus.get_nbpts())\n if verbose:\n print(\"\\t. Number of data points: \", nbpts)\n\n # Get stamps\n stamps, data= nexus.extractData()\n\n nexus.close()\n\n if show_data_stamps :\n print(\"\\t. Available Counters:\")\n for i, stamp in enumerate(stamps):\n if stamp[1] is not None:\n if show_data_stamps:\n print(\"\\t\\t\", i, ' -------> ', stamp[1])\n else:\n if show_data_stamps:\n print(\"\\t\\t\",i, ' -------> ', stamp[0])\n\n\n def extract_and_correct(ind_spectrum):\n '''\n Extract the requested fluospectrum from the nexus file and correct it with ICR/OCR\n '''\n is_icr_found = False\n is_ocr_found = False\n for i, stamp in enumerate(stamps):\n if (stamp[1] is not None and stamp[1].lower() == \"fluoicr0\"+ind_spectrum):\n fluoicr = data[i]\n is_icr_found = True\n if (stamp[1] is not None and stamp[1].lower() == \"fluoocr0\"+ind_spectrum):\n fluoocr = data[i]\n is_ocr_found = True\n if (stamp[1] is not None and stamp[1].lower() == \"fluospectrum0\"+ind_spectrum):\n fluospectrum = data[i]\n if (stamp[1] is None and stamp[0].lower() == \"integration_time\"):\n integration_time = data[i]\n\n if is_icr_found:\n ICR = fluoicr\n if is_ocr_found:\n OCR = fluoocr\n else:\n print(_RED+\"OCR not found in data. Taking OCR = spectrum_intensity/counting_time.\"+_RESET)\n OCR = np.array([np.sum(fluospectrum[n])/integration_time[n] for n in range(len(fluospectrum))])\n\n ratio = np.array([ICR[n]/OCR[n] if (~np.isclose(OCR[n],0.) & ~np.isnan(OCR[n]) & ~np.isnan(ICR[n]))\n else 0. for n in range(len(ICR))])\n spectrums_corr = np.array([fluospectrum[n]*ratio[n] for n in range(len(ratio))])\n return spectrums_corr\n\n print(_RED+\"ICR not found in data. Check if the box \\'Elements\\' is right.\"+_RESET)\n print(_RED+\"Try to put 4 in the box \\'Elements\\' for the single-element detector.\"+_RESET)\n print(_RED+\"Try to put 0, 1, 2, 3 in the box \\'Elements\\' for the four-elements detector.\"+_RESET)\n sys.exit('ICR not found.')\n\n # Correct each chosen element with ICR/OCR and sum them\n allspectrums_corr = np.zeros((nbpts, 2048))\n\n for i in list_elems:\n allspectrums_corr += extract_and_correct(str(i))\n\n ind_non_zero_spectrums = np.where(np.sum(allspectrums_corr, axis = 1)>10.)[0]\n first_non_zero_spectrum = ind_non_zero_spectrums[0]\n last_non_zero_spectrum = ind_non_zero_spectrums[-1]\n\n channels = np.arange(int(first_channel), int(last_channel+1))\n eVs = channels*gain+eV0\n spectrums = allspectrums_corr[0:last_non_zero_spectrum+1,\n int(first_channel):int(last_channel+1)]\n\n return channels, eVs, spectrums, first_non_zero_spectrum, last_non_zero_spectrum\n\n\ndef Extract_mat(filename, recording_dir,\n list_elems, first_channel, last_channel,\n gain, eV0, show_data_stamps, verbose):\n '''\n Extract the spectrums from .mat files and return useful quantities extracted from the .dat file.\n\n Parameters\n ----------\n filename : str\n Filename without the extension.\n recording_dir : str\n Directory where the mat and dat files are stored.\n list_elems : ndarray\n An array with the elements to extract, for ex. list_elems = [1, 2, 3].\n first_channel : int, optional\n The spectrums will be extracted between first_channel and last_channel.\n last_channel : int\n The spectrums will be extracted between first_channel and last_channel.\n gain : float\n Channels are converted to eVs following eVs = gain*channel+eV0.\n ev0 : float\n Channels are converted to eVs following eVs = gain*channel+eV0.\n show_data_stamps : bool, optional\n Print the list of sensors from the dat file.\n verbose : bool, optional\n Verbose mode.\n\n Returns\n -------\n ndarray\n channels, an array containing the channels.\n ndarray\n eVs, an array containing the channels converted to eVs.\n ndarray\n spectrums, an array containing the spectrums.\n int\n first_non_zero_spectrum, index of the first scan extracted.\n int\n last_non_zero_spectrum, index of the last scan extracted.\n\n Raises\n ------\n SystemExit('.mat not found')\n when mat file is not found.\n SystemExit('.dat not found')\n when dat file is not found.\n SystemExit('ICR not found')\n when no ICR is found (most likely because the wrong elements were given).\n '''\n file_path = recording_dir+filename\n path_to_dat = file_path+'.dat'\n\n if not os.path.isfile(path_to_dat):\n print(_RED+'Scan %s seems not to exist in recording directory'%(filename+'.dat')+_RESET)\n print(('\\t\\t recording directory : '+recording_dir))\n sys.exit('.dat not found')\n\n else:\n \n # Extract list of detector elements available\n SDD_elems_available = []\n nbpts = 0\n for index_element in list_elems:\n path_to_mat = file_path+'_fluospectrum0'+str(index_element)+'.mat'\n\n if os.path.isfile(path_to_mat):\n SDD_elems_available.append(str(index_element))\n\n # Extract number of spectrums taken during the scan\n nbpts = np.shape(np.genfromtxt(path_to_mat))[0]\n \n \n if SDD_elems_available == []:\n print(_RED,'\\t No .mat file found.',_RESET)\n sys.exit('.mat not found')\n\n if verbose:\n print(\"\\t. Number of data points: \", nbpts)\n\n # Get stamps and data\n dat_extracted = np.genfromtxt(path_to_dat, names=True)\n stamps = [(name, name, None) for name in dat_extracted.dtype.names]\n data = np.genfromtxt(path_to_dat, skip_header=1).transpose()\n \n if show_data_stamps :\n print(\"\\t. Available Counters:\")\n for i, stamp in enumerate(stamps):\n if stamp[1] is not None:\n if show_data_stamps:\n print(\"\\t\\t\", i, ' -------> ', stamp[1])\n else:\n if show_data_stamps:\n print(\"\\t\\t\",i, ' -------> ', stamp[0])\n\n\n def extract_and_correct(ind_spectrum):\n '''\n Extract the requested fluospectrum from the mat file and correct it with ICR/OCR from the dat file\n '''\n is_icr_found = False\n is_ocr_found = False\n\n for i, stamp in enumerate(stamps):\n if (stamp[1] is not None and stamp[1].lower() == \"fluoicr0\"+ind_spectrum):\n fluoicr = data[i]\n is_icr_found = True\n if (stamp[1] is not None and stamp[1].lower() == \"fluoocr0\"+ind_spectrum):\n fluoocr = data[i]\n is_ocr_found = True\n if (stamp[1] is None and stamp[0].lower() == \"integration_time\"):\n integration_time = data[i]\n\n fluospectrum = np.genfromtxt(file_path+'_fluospectrum0'+str(ind_spectrum)+'.mat')\n \n if is_icr_found:\n ICR = fluoicr\n if is_ocr_found:\n OCR = fluoocr\n else:\n print(_RED+\"OCR not found in data. Taking OCR = spectrum_intensity/counting_time.\"+_RESET)\n OCR = np.array([np.sum(fluospectrum[n])/integration_time[n] for n in range(len(fluospectrum))])\n\n ratio = np.array([ICR[n]/OCR[n] if (~np.isclose(OCR[n],0.) & ~np.isnan(OCR[n]) & ~np.isnan(ICR[n]))\n else 0. for n in range(len(ICR))])\n spectrums_corr = np.array([fluospectrum[n]*ratio[n] for n in range(len(ratio))])\n return spectrums_corr\n\n print(_RED+\"ICR not found in data. Check if the box \\'Elements\\' is right.\"+_RESET)\n print(_RED+\"Try to put 4 in the box \\'Elements\\' for the single-element detector.\"+_RESET)\n print(_RED+\"Try to put 0, 1, 2, 3 in the box \\'Elements\\' for the four-elements detector.\"+_RESET)\n sys.exit('ICR not found.')\n\n # Correct each chosen element with ICR/OCR and sum them\n allspectrums_corr = np.zeros((nbpts, 2048))\n\n for i in list_elems:\n allspectrums_corr += extract_and_correct(str(i))\n\n ind_non_zero_spectrums = np.where(np.sum(allspectrums_corr, axis = 1)>10.)[0]\n first_non_zero_spectrum = ind_non_zero_spectrums[0]\n last_non_zero_spectrum = ind_non_zero_spectrums[-1]\n\n channels = np.arange(int(first_channel), int(last_channel+1))\n eVs = channels*gain+eV0\n spectrums = allspectrums_corr[0:last_non_zero_spectrum+1,\n int(first_channel):int(last_channel+1)]\n\n return channels, eVs, spectrums, first_non_zero_spectrum, last_non_zero_spectrum\n\n\n\n\ndef Plot(channels, eVs, spectrums, first_non_zero_spectrum, last_non_zero_spectrum,\n use_eV, arr_peaks, absorbers, logz,\n nxs_filename, plot_spectrogram, plot_sum, plot_first_last):\n '''\n Plot XRF data.\n\n Parameters\n ----------\n channels : ndarray\n The channels.\n eVs : ndarray\n The channels converted to eVs.\n spectrums : ndarray\n The spectrums.\n first_non_zero_spectrum : int\n Index of the first scan extracted.\n last_non_zero_spectrum : int\n Index of the last scan extracted.\n use_eV : bool\n Convert the channels to eVs.\n arr_peaks : ndarray, optional\n An array with the peaks to display, for ex. arr_peaks = [('Elastic', '12000.'), ('Compton', '11670.')].\n absorbers : str, optional\n Text to display indicating which absorber was used.\n logz : bool\n Log scale on the plots.\n nxs_filename : str\n Nexus filename.\n plot_spectrogram : bool, optional\n Plot the spectrogram.\n plot_sum : bool, optional\n Plot the sum of the spectrums over time.\n plot_first_last : bool, optional\n Plot the first and last spectrum.\n '''\n # Print absorbers\n if absorbers != '':\n print(\"\\t. Absorbers:\", str(absorbers))\n\n if plot_spectrogram:\n\n fig = plt.figure(figsize=(12,4.6))\n ax1 = fig.add_subplot(111)\n ax1.set_title(nxs_filename.split('\\\\')[-1], fontsize='x-large')\n ax1.set_xlabel('spectrum index', fontsize='large')\n ax1.set_xlim(left = 0, right = last_non_zero_spectrum)\n\n if use_eV:\n xx, yy = np.meshgrid(np.arange(0,last_non_zero_spectrum+1), eVs)\n ax1.set_ylabel('eV', fontsize='large')\n else:\n xx, yy = np.meshgrid(np.arange(0,last_non_zero_spectrum+1), channels)\n ax1.set_ylabel('channel', fontsize='large')\n\n if logz:\n ax1.pcolormesh(xx, yy, spectrums.transpose(), cmap='viridis', shading = 'auto',\n norm = colors.LogNorm(), rasterized=True)\n else:\n ax1.pcolormesh(xx, yy, spectrums.transpose(), cmap='viridis', shading = 'auto',\n rasterized=True)\n\n plt.show()\n\n if plot_sum:\n fig = plt.figure(figsize=(12,4.5))\n ax1 = fig.add_subplot(111)\n ax1.set_ylabel('counts', fontsize='large')\n if logz:\n ax1.set_yscale('log')\n if use_eV:\n ax1.set_xlabel('eV', fontsize='large')\n line1, = ax1.plot(eVs, np.sum(spectrums, axis = 0), 'b.-', label='Sum of spectrums')\n else:\n ax1.set_xlabel('channel', fontsize='large')\n line1, = ax1.plot(channels, np.sum(spectrums, axis = 0), 'b.-', label='Sum of spectrums')\n\n if arr_peaks[0][0] is not None :\n\n # Plot the peak positions\n\n # Prepare a list of colors and linestyles\n colors_axv = iter(['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2',\n '#7f7f7f', '#bcbd22', '#17becf']*20)\n linestyles_axv = iter(['--', '-.', '-', ':']*40)\n\n # Rearrange the peaks to plot them by increasing energy\n arr_peaks = np.array(arr_peaks)\n arg_position_peaks = np.argsort([float(elem[1]) for elem in arr_peaks])\n val_position_peaks = arr_peaks[arg_position_peaks][:,1]\n labels_peaks = arr_peaks[arg_position_peaks][:,0]\n\n axvlines = []\n for i in range(len(arr_peaks)):\n axvlines.append(ax1.axvline(float(val_position_peaks[i]), label = str(labels_peaks[i]),\n color = next(colors_axv), linestyle = next(linestyles_axv)))\n\n axvlegends = ax1.legend(handles=axvlines, fontsize=10, ncol = len(arr_peaks)//16+1,\n bbox_to_anchor=(1.01, 1.), loc='upper left', borderaxespad=0.)\n plt.gca().add_artist(axvlegends)\n\n ax1.legend(handles=[line1], fontsize='large', loc='upper left')\n plt.show()\n\n if plot_first_last:\n #Plot the selected channel range\n fig = plt.figure(figsize=(12,4.5))\n ax1 = fig.add_subplot(111)\n ax1.set_ylabel('counts', fontsize='large')\n if logz:\n ax1.set_yscale('log')\n if use_eV:\n ax1.set_xlabel('eV', fontsize='large')\n line1, = ax1.plot(eVs, spectrums[first_non_zero_spectrum], 'b.-', label='First spectrum')\n line2, = ax1.plot(eVs, spectrums[-1], 'r.-', label='Last spectrum')\n else:\n ax1.set_xlabel('channel', fontsize='large')\n line1, = ax1.plot(channels, spectrums[first_non_zero_spectrum], 'b.-', label='First spectrum')\n line2, = ax1.plot(channels, spectrums[-1], 'r.-', label='Last spectrum')\n\n if arr_peaks[0][0] is not None :\n\n # Rearrange the peaks to plot them by increasing energy\n arr_peaks = np.array(arr_peaks)\n arg_position_peaks = np.argsort([float(elem[1]) for elem in arr_peaks])\n val_position_peaks = arr_peaks[arg_position_peaks][:,1]\n labels_peaks = arr_peaks[arg_position_peaks][:,0]\n\n axvlines = []\n for i in range(len(arr_peaks)):\n axvlines.append(ax1.axvline(float(val_position_peaks[i]), label = str(labels_peaks[i]),\n color = next(colors_axv), linestyle = next(linestyles_axv)))\n\n axvlegends = ax1.legend(handles=axvlines, fontsize=10, ncol = len(arr_peaks)//16+1,\n bbox_to_anchor=(1.01, 1.), loc='upper left', borderaxespad=0.)\n plt.gca().add_artist(axvlegends)\n\n ax1.legend(handles=[line1, line2], fontsize='large', loc='upper left')\n plt.show()\n\n\ndef Save(nxs_filename, recording_dir, fast, working_dir, verbose):\n '''\n Use the PyNexus library to convert the Nexus file into a .dat file.\n\n Parameters\n ----------\n nxs_filename : str\n Nexus filename.\n recording_dir : str\n Directory where the nexus file is stored.\n fast : bool\n Trigger fast extract of the nexus file.\n working_dir : str\n Directory where the treated files will be stored.\n verbose : bool\n Verbose mode.\n '''\n savename = working_dir + nxs_filename[:nxs_filename.rfind('.nxs')]\n\n # We assume extraction was already checked with Extract\n nxs_path = recording_dir+nxs_filename\n nexus=PN.PyNexusFile(nxs_path, fast=fast)\n\n # Get stamps and Data\n stamps, data = nexus.extractData()\n\n f = io.StringIO()\n # Avoid printing sensors in the notebook\n with redirect_stdout(f):\n old_nexus_filename = nexus.filename\n # Save in working dir\n nexus.filename = working_dir+nxs_filename\n nexus.savePointExtractedData((stamps, data))\n nexus.saveOneDExtractedData((stamps, data))\n nexus.filename = old_nexus_filename\n out = f.getvalue()\n\n nexus.close()\n\n if verbose:\n print('\\t. 0D data saved in:')\n print(\"\\t\", savename+'.dat')\n print('\\t. Spectrum(s) saved in:')\n print(\"\\t\", savename+'_fluospectrum*.mat')\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.colors.LogNorm",
"numpy.isnan",
"numpy.arange",
"numpy.isclose",
"numpy.genfromtxt",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
siraferradans/scikit-image-temp
|
[
"db3b97aec1824d8f49116b8918ff95639af441d4",
"db3b97aec1824d8f49116b8918ff95639af441d4"
] |
[
"skimage/color/colorconv.py",
"doc/examples/filters/plot_gabor_vs_morlet.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Functions for converting between color spaces.\n\nThe \"central\" color space in this module is RGB, more specifically the linear\nsRGB color space using D65 as a white-point [1]_. This represents a\nstandard monitor (w/o gamma correction). For a good FAQ on color spaces see\n[2]_.\n\nThe API consists of functions to convert to and from RGB as defined above, as\nwell as a generic function to convert to and from any supported color space\n(which is done through RGB in most cases).\n\n\nSupported color spaces\n----------------------\n* RGB : Red Green Blue.\n Here the sRGB standard [1]_.\n* HSV : Hue, Saturation, Value.\n Uniquely defined when related to sRGB [3]_.\n* RGB CIE : Red Green Blue.\n The original RGB CIE standard from 1931 [4]_. Primary colors are 700 nm\n (red), 546.1 nm (blue) and 435.8 nm (green).\n* XYZ CIE : XYZ\n Derived from the RGB CIE color space. Chosen such that\n ``x == y == z == 1/3`` at the whitepoint, and all color matching\n functions are greater than zero everywhere.\n* LAB CIE : Lightness, a, b\n Colorspace derived from XYZ CIE that is intended to be more\n perceptually uniform\n* LUV CIE : Lightness, u, v\n Colorspace derived from XYZ CIE that is intended to be more\n perceptually uniform\n* LCH CIE : Lightness, Chroma, Hue\n Defined in terms of LAB CIE. C and H are the polar representation of\n a and b. The polar angle C is defined to be on ``(0, 2*pi)``\n\n:author: Nicolas Pinto (rgb2hsv)\n:author: Ralf Gommers (hsv2rgb)\n:author: Travis Oliphant (XYZ and RGB CIE functions)\n:author: Matt Terry (lab2lch)\n:author: Alex Izvorski (yuv2rgb, rgb2yuv and related)\n\n:license: modified BSD\n\nReferences\n----------\n.. [1] Official specification of sRGB, IEC 61966-2-1:1999.\n.. [2] http://www.poynton.com/ColorFAQ.html\n.. [3] http://en.wikipedia.org/wiki/HSL_and_HSV\n.. [4] http://en.wikipedia.org/wiki/CIE_1931_color_space\n\"\"\"\n\nfrom __future__ import division\n\nfrom warnings import warn\nimport numpy as np\nfrom scipy import linalg\nfrom ..util import dtype, dtype_limits\n\n\ndef guess_spatial_dimensions(image):\n \"\"\"Make an educated guess about whether an image has a channels dimension.\n\n Parameters\n ----------\n image : ndarray\n The input image.\n\n Returns\n -------\n spatial_dims : int or None\n The number of spatial dimensions of `image`. If ambiguous, the value\n is ``None``.\n\n Raises\n ------\n ValueError\n If the image array has less than two or more than four dimensions.\n \"\"\"\n if image.ndim == 2:\n return 2\n if image.ndim == 3 and image.shape[-1] != 3:\n return 3\n if image.ndim == 3 and image.shape[-1] == 3:\n return None\n if image.ndim == 4 and image.shape[-1] == 3:\n return 3\n else:\n raise ValueError(\"Expected 2D, 3D, or 4D array, got %iD.\" % image.ndim)\n\n\ndef convert_colorspace(arr, fromspace, tospace):\n \"\"\"Convert an image array to a new color space.\n\n Parameters\n ----------\n arr : array_like\n The image to convert.\n fromspace : str\n The color space to convert from. Valid color space strings are\n ``['RGB', 'HSV', 'RGB CIE', 'XYZ']``. Value may also be specified as\n lower case.\n tospace : str\n The color space to convert to. Valid color space strings are\n ``['RGB', 'HSV', 'RGB CIE', 'XYZ']``. Value may also be specified as\n lower case.\n\n Returns\n -------\n newarr : ndarray\n The converted image.\n\n Notes\n -----\n Conversion occurs through the \"central\" RGB color space, i.e. conversion\n from XYZ to HSV is implemented as ``XYZ -> RGB -> HSV`` instead of\n directly.\n\n Examples\n --------\n >>> from skimage import data\n >>> img = data.astronaut()\n >>> img_hsv = convert_colorspace(img, 'RGB', 'HSV')\n \"\"\"\n fromdict = {'RGB': lambda im: im, 'HSV': hsv2rgb, 'RGB CIE': rgbcie2rgb,\n 'XYZ': xyz2rgb, 'YUV': yuv2rgb, 'YIQ': yiq2rgb,\n 'YPbPr': ypbpr2rgb, 'YCbCr': ycbcr2rgb }\n todict = {'RGB': lambda im: im, 'HSV': rgb2hsv, 'RGB CIE': rgb2rgbcie,\n 'XYZ': rgb2xyz, 'YUV': rgb2yuv, 'YIQ': rgb2yiq,\n 'YPbPr': rgb2ypbpr, 'YCbCr': rgb2ycbcr }\n\n fromspace = fromspace.upper()\n tospace = tospace.upper()\n if fromspace not in fromdict.keys():\n raise ValueError('fromspace needs to be one of %s' % fromdict.keys())\n if tospace not in todict.keys():\n raise ValueError('tospace needs to be one of %s' % todict.keys())\n\n return todict[tospace](fromdict[fromspace](arr))\n\n\ndef _prepare_colorarray(arr):\n \"\"\"Check the shape of the array and convert it to\n floating point representation.\n\n \"\"\"\n arr = np.asanyarray(arr)\n\n if arr.ndim not in [3, 4] or arr.shape[-1] != 3:\n msg = (\"the input array must be have a shape == (.., ..,[ ..,] 3)), \" +\n \"got (\" + (\", \".join(map(str, arr.shape))) + \")\")\n raise ValueError(msg)\n\n return dtype.img_as_float(arr)\n\n\ndef rgb2hsv(rgb):\n \"\"\"RGB to HSV color space conversion.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.\n\n Returns\n -------\n out : ndarray\n The image in HSV format, in a 3-D array of shape ``(.., .., 3)``.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3-D array of shape ``(.., .., 3)``.\n\n Notes\n -----\n The conversion assumes an input data range of [0, 1] for all\n color components.\n\n Conversion between RGB and HSV color spaces results in some loss of\n precision, due to integer arithmetic and rounding [1]_.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/HSL_and_HSV\n\n Examples\n --------\n >>> from skimage import color\n >>> from skimage import data\n >>> img = data.astronaut()\n >>> img_hsv = color.rgb2hsv(img)\n \"\"\"\n arr = _prepare_colorarray(rgb)\n out = np.empty_like(arr)\n\n # -- V channel\n out_v = arr.max(-1)\n\n # -- S channel\n delta = arr.ptp(-1)\n # Ignore warning for zero divided by zero\n old_settings = np.seterr(invalid='ignore')\n out_s = delta / out_v\n out_s[delta == 0.] = 0.\n\n # -- H channel\n # red is max\n idx = (arr[:, :, 0] == out_v)\n out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]\n\n # green is max\n idx = (arr[:, :, 1] == out_v)\n out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]\n\n # blue is max\n idx = (arr[:, :, 2] == out_v)\n out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]\n out_h = (out[:, :, 0] / 6.) % 1.\n out_h[delta == 0.] = 0.\n\n np.seterr(**old_settings)\n\n # -- output\n out[:, :, 0] = out_h\n out[:, :, 1] = out_s\n out[:, :, 2] = out_v\n\n # remove NaN\n out[np.isnan(out)] = 0\n\n return out\n\n\ndef hsv2rgb(hsv):\n \"\"\"HSV to RGB color space conversion.\n\n Parameters\n ----------\n hsv : array_like\n The image in HSV format, in a 3-D array of shape ``(.., .., 3)``.\n\n Returns\n -------\n out : ndarray\n The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.\n\n Raises\n ------\n ValueError\n If `hsv` is not a 3-D array of shape ``(.., .., 3)``.\n\n Notes\n -----\n The conversion assumes an input data range of ``[0, 1]`` for all\n color components.\n\n Conversion between RGB and HSV color spaces results in some loss of\n precision, due to integer arithmetic and rounding [1]_.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/HSL_and_HSV\n\n Examples\n --------\n >>> from skimage import data\n >>> img = data.astronaut()\n >>> img_hsv = rgb2hsv(img)\n >>> img_rgb = hsv2rgb(img_hsv)\n \"\"\"\n arr = _prepare_colorarray(hsv)\n\n hi = np.floor(arr[:, :, 0] * 6)\n f = arr[:, :, 0] * 6 - hi\n p = arr[:, :, 2] * (1 - arr[:, :, 1])\n q = arr[:, :, 2] * (1 - f * arr[:, :, 1])\n t = arr[:, :, 2] * (1 - (1 - f) * arr[:, :, 1])\n v = arr[:, :, 2]\n\n hi = np.dstack([hi, hi, hi]).astype(np.uint8) % 6\n out = np.choose(hi, [np.dstack((v, t, p)),\n np.dstack((q, v, p)),\n np.dstack((p, v, t)),\n np.dstack((p, q, v)),\n np.dstack((t, p, v)),\n np.dstack((v, p, q))])\n\n return out\n\n\n# ---------------------------------------------------------------\n# Primaries for the coordinate systems\n# ---------------------------------------------------------------\ncie_primaries = np.array([700, 546.1, 435.8])\nsb_primaries = np.array([1. / 155, 1. / 190, 1. / 225]) * 1e5\n\n# ---------------------------------------------------------------\n# Matrices that define conversion between different color spaces\n# ---------------------------------------------------------------\n\n# From sRGB specification\nxyz_from_rgb = np.array([[0.412453, 0.357580, 0.180423],\n [0.212671, 0.715160, 0.072169],\n [0.019334, 0.119193, 0.950227]])\n\nrgb_from_xyz = linalg.inv(xyz_from_rgb)\n\n# From http://en.wikipedia.org/wiki/CIE_1931_color_space\n# Note: Travis's code did not have the divide by 0.17697\nxyz_from_rgbcie = np.array([[0.49, 0.31, 0.20],\n [0.17697, 0.81240, 0.01063],\n [0.00, 0.01, 0.99]]) / 0.17697\n\nrgbcie_from_xyz = linalg.inv(xyz_from_rgbcie)\n\n# construct matrices to and from rgb:\nrgbcie_from_rgb = np.dot(rgbcie_from_xyz, xyz_from_rgb)\nrgb_from_rgbcie = np.dot(rgb_from_xyz, xyz_from_rgbcie)\n\n\ngray_from_rgb = np.array([[0.2125, 0.7154, 0.0721],\n [0, 0, 0],\n [0, 0, 0]])\n\nyuv_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],\n [-0.14714119, -0.28886916, 0.43601035 ],\n [ 0.61497538, -0.51496512, -0.10001026 ]])\n\nrgb_from_yuv = linalg.inv(yuv_from_rgb)\n\nyiq_from_rgb = np.array([[0.299 , 0.587 , 0.114 ],\n [0.59590059, -0.27455667, -0.32134392],\n [0.21153661, -0.52273617, 0.31119955]])\n\nrgb_from_yiq = linalg.inv(yiq_from_rgb)\n\nypbpr_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],\n [-0.168736,-0.331264, 0.5 ],\n [ 0.5 ,-0.418688,-0.081312]])\n\nrgb_from_ypbpr = linalg.inv(ypbpr_from_rgb)\n\nycbcr_from_rgb = np.array([[ 65.481, 128.553, 24.966],\n [ -37.797, -74.203, 112.0 ],\n [ 112.0 , -93.786, -18.214]])\n\nrgb_from_ycbcr = linalg.inv(ycbcr_from_rgb)\n\n# CIE LAB constants for Observer=2A, Illuminant=D65\n# NOTE: this is actually the XYZ values for the illuminant above.\nlab_ref_white = np.array([0.95047, 1., 1.08883])\n\n# XYZ coordinates of the illuminants, scaled to [0, 1]. For each illuminant I\n# we have:\n#\n# illuminant[I][0] corresponds to the XYZ coordinates for the 2 degree\n# field of view.\n#\n# illuminant[I][1] corresponds to the XYZ coordinates for the 10 degree\n# field of view.\n#\n# The XYZ coordinates are calculated from [1], using the formula:\n#\n# X = x * ( Y / y )\n# Y = Y\n# Z = ( 1 - x - y ) * ( Y / y )\n#\n# where Y = 1. The only exception is the illuminant \"D65\" with aperture angle\n# 2, whose coordinates are copied from 'lab_ref_white' for\n# backward-compatibility reasons.\n#\n# References\n# ----------\n# .. [1] http://en.wikipedia.org/wiki/Standard_illuminant\n\nilluminants = \\\n {\"A\": {'2': (1.098466069456375, 1, 0.3558228003436005),\n '10': (1.111420406956693, 1, 0.3519978321919493)},\n \"D50\": {'2': (0.9642119944211994, 1, 0.8251882845188288),\n '10': (0.9672062750333777, 1, 0.8142801513128616)},\n \"D55\": {'2': (0.956797052643698, 1, 0.9214805860173273),\n '10': (0.9579665682254781, 1, 0.9092525159847462)},\n \"D65\": {'2': (0.95047, 1., 1.08883), # This was: `lab_ref_white`\n '10': (0.94809667673716, 1, 1.0730513595166162)},\n \"D75\": {'2': (0.9497220898840717, 1, 1.226393520724154),\n '10': (0.9441713925645873, 1, 1.2064272211720228)},\n \"E\": {'2': (1.0, 1.0, 1.0),\n '10': (1.0, 1.0, 1.0)}}\n\n\ndef get_xyz_coords(illuminant, observer):\n \"\"\"Get the XYZ coordinates of the given illuminant and observer [1]_.\n\n Parameters\n ----------\n illuminant : {\"A\", \"D50\", \"D55\", \"D65\", \"D75\", \"E\"}, optional\n The name of the illuminant (the function is NOT case sensitive).\n observer : {\"2\", \"10\"}, optional\n The aperture angle of the observer.\n\n Returns\n -------\n (x, y, z) : tuple\n A tuple with 3 elements containing the XYZ coordinates of the given\n illuminant.\n\n Raises\n ------\n ValueError\n If either the illuminant or the observer angle are not supported or\n unknown.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Standard_illuminant\n\n \"\"\"\n illuminant = illuminant.upper()\n try:\n return illuminants[illuminant][observer]\n except KeyError:\n raise ValueError(\"Unknown illuminant/observer combination\\\n (\\'{0}\\', \\'{1}\\')\".format(illuminant, observer))\n\n# Haematoxylin-Eosin-DAB colorspace\n# From original Ruifrok's paper: A. C. Ruifrok and D. A. Johnston,\n# \"Quantification of histochemical staining by color deconvolution.,\"\n# Analytical and quantitative cytology and histology / the International\n# Academy of Cytology [and] American Society of Cytology, vol. 23, no. 4,\n# pp. 291-9, Aug. 2001.\nrgb_from_hed = np.array([[0.65, 0.70, 0.29],\n [0.07, 0.99, 0.11],\n [0.27, 0.57, 0.78]])\nhed_from_rgb = linalg.inv(rgb_from_hed)\n\n# Following matrices are adapted form the Java code written by G.Landini.\n# The original code is available at:\n# http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html\n\n# Hematoxylin + DAB\nrgb_from_hdx = np.array([[0.650, 0.704, 0.286],\n [0.268, 0.570, 0.776],\n [0.0, 0.0, 0.0]])\nrgb_from_hdx[2, :] = np.cross(rgb_from_hdx[0, :], rgb_from_hdx[1, :])\nhdx_from_rgb = linalg.inv(rgb_from_hdx)\n\n# Feulgen + Light Green\nrgb_from_fgx = np.array([[0.46420921, 0.83008335, 0.30827187],\n [0.94705542, 0.25373821, 0.19650764],\n [0.0, 0.0, 0.0]])\nrgb_from_fgx[2, :] = np.cross(rgb_from_fgx[0, :], rgb_from_fgx[1, :])\nfgx_from_rgb = linalg.inv(rgb_from_fgx)\n\n# Giemsa: Methyl Blue + Eosin\nrgb_from_bex = np.array([[0.834750233, 0.513556283, 0.196330403],\n [0.092789, 0.954111, 0.283111],\n [0.0, 0.0, 0.0]])\nrgb_from_bex[2, :] = np.cross(rgb_from_bex[0, :], rgb_from_bex[1, :])\nbex_from_rgb = linalg.inv(rgb_from_bex)\n\n# FastRed + FastBlue + DAB\nrgb_from_rbd = np.array([[0.21393921, 0.85112669, 0.47794022],\n [0.74890292, 0.60624161, 0.26731082],\n [0.268, 0.570, 0.776]])\nrbd_from_rgb = linalg.inv(rgb_from_rbd)\n\n# Methyl Green + DAB\nrgb_from_gdx = np.array([[0.98003, 0.144316, 0.133146],\n [0.268, 0.570, 0.776],\n [0.0, 0.0, 0.0]])\nrgb_from_gdx[2, :] = np.cross(rgb_from_gdx[0, :], rgb_from_gdx[1, :])\ngdx_from_rgb = linalg.inv(rgb_from_gdx)\n\n# Hematoxylin + AEC\nrgb_from_hax = np.array([[0.650, 0.704, 0.286],\n [0.2743, 0.6796, 0.6803],\n [0.0, 0.0, 0.0]])\nrgb_from_hax[2, :] = np.cross(rgb_from_hax[0, :], rgb_from_hax[1, :])\nhax_from_rgb = linalg.inv(rgb_from_hax)\n\n# Blue matrix Anilline Blue + Red matrix Azocarmine + Orange matrix Orange-G\nrgb_from_bro = np.array([[0.853033, 0.508733, 0.112656],\n [0.09289875, 0.8662008, 0.49098468],\n [0.10732849, 0.36765403, 0.9237484]])\nbro_from_rgb = linalg.inv(rgb_from_bro)\n\n# Methyl Blue + Ponceau Fuchsin\nrgb_from_bpx = np.array([[0.7995107, 0.5913521, 0.10528667],\n [0.09997159, 0.73738605, 0.6680326],\n [0.0, 0.0, 0.0]])\nrgb_from_bpx[2, :] = np.cross(rgb_from_bpx[0, :], rgb_from_bpx[1, :])\nbpx_from_rgb = linalg.inv(rgb_from_bpx)\n\n# Alcian Blue + Hematoxylin\nrgb_from_ahx = np.array([[0.874622, 0.457711, 0.158256],\n [0.552556, 0.7544, 0.353744],\n [0.0, 0.0, 0.0]])\nrgb_from_ahx[2, :] = np.cross(rgb_from_ahx[0, :], rgb_from_ahx[1, :])\nahx_from_rgb = linalg.inv(rgb_from_ahx)\n\n# Hematoxylin + PAS\nrgb_from_hpx = np.array([[0.644211, 0.716556, 0.266844],\n [0.175411, 0.972178, 0.154589],\n [0.0, 0.0, 0.0]])\nrgb_from_hpx[2, :] = np.cross(rgb_from_hpx[0, :], rgb_from_hpx[1, :])\nhpx_from_rgb = linalg.inv(rgb_from_hpx)\n\n# -------------------------------------------------------------\n# The conversion functions that make use of the matrices above\n# -------------------------------------------------------------\n\n\ndef _convert(matrix, arr):\n \"\"\"Do the color space conversion.\n\n Parameters\n ----------\n matrix : array_like\n The 3x3 matrix to use.\n arr : array_like\n The input array.\n\n Returns\n -------\n out : ndarray, dtype=float\n The converted array.\n \"\"\"\n arr = _prepare_colorarray(arr)\n\n return np.dot(arr, matrix.T.copy())\n\n\ndef xyz2rgb(xyz):\n \"\"\"XYZ to RGB color space conversion.\n\n Parameters\n ----------\n xyz : array_like\n The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.\n\n Returns\n -------\n out : ndarray\n The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.\n\n Raises\n ------\n ValueError\n If `xyz` is not a 3-D array of shape ``(.., .., 3)``.\n\n Notes\n -----\n The CIE XYZ color space is derived from the CIE RGB color space. Note\n however that this function converts to sRGB.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/CIE_1931_color_space\n\n Examples\n --------\n >>> from skimage import data\n >>> from skimage.color import rgb2xyz, xyz2rgb\n >>> img = data.astronaut()\n >>> img_xyz = rgb2xyz(img)\n >>> img_rgb = xyz2rgb(img_xyz)\n \"\"\"\n # Follow the algorithm from http://www.easyrgb.com/index.php\n # except we don't multiply/divide by 100 in the conversion\n arr = _convert(rgb_from_xyz, xyz)\n mask = arr > 0.0031308\n arr[mask] = 1.055 * np.power(arr[mask], 1 / 2.4) - 0.055\n arr[~mask] *= 12.92\n arr[arr < 0] = 0\n arr[arr > 1] = 1\n return arr\n\n\ndef rgb2xyz(rgb):\n \"\"\"RGB to XYZ color space conversion.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3- or 4-D array of shape\n ``(.., ..,[ ..,] 3)``.\n\n Returns\n -------\n out : ndarray\n The image in XYZ format, in a 3- or 4-D array of shape\n ``(.., ..,[ ..,] 3)``.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3- or 4-D array of shape ``(.., ..,[ ..,] 3)``.\n\n Notes\n -----\n The CIE XYZ color space is derived from the CIE RGB color space. Note\n however that this function converts from sRGB.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/CIE_1931_color_space\n\n Examples\n --------\n >>> from skimage import data\n >>> img = data.astronaut()\n >>> img_xyz = rgb2xyz(img)\n \"\"\"\n # Follow the algorithm from http://www.easyrgb.com/index.php\n # except we don't multiply/divide by 100 in the conversion\n arr = _prepare_colorarray(rgb).copy()\n mask = arr > 0.04045\n arr[mask] = np.power((arr[mask] + 0.055) / 1.055, 2.4)\n arr[~mask] /= 12.92\n return _convert(xyz_from_rgb, arr)\n\n\ndef rgb2rgbcie(rgb):\n \"\"\"RGB to RGB CIE color space conversion.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.\n\n Returns\n -------\n out : ndarray\n The image in RGB CIE format, in a 3-D array of shape ``(.., .., 3)``.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3-D array of shape ``(.., .., 3)``.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/CIE_1931_color_space\n\n Examples\n --------\n >>> from skimage import data\n >>> from skimage.color import rgb2rgbcie\n >>> img = data.astronaut()\n >>> img_rgbcie = rgb2rgbcie(img)\n \"\"\"\n return _convert(rgbcie_from_rgb, rgb)\n\n\ndef rgbcie2rgb(rgbcie):\n \"\"\"RGB CIE to RGB color space conversion.\n\n Parameters\n ----------\n rgbcie : array_like\n The image in RGB CIE format, in a 3-D array of shape ``(.., .., 3)``.\n\n Returns\n -------\n out : ndarray\n The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.\n\n Raises\n ------\n ValueError\n If `rgbcie` is not a 3-D array of shape ``(.., .., 3)``.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/CIE_1931_color_space\n\n Examples\n --------\n >>> from skimage import data\n >>> from skimage.color import rgb2rgbcie, rgbcie2rgb\n >>> img = data.astronaut()\n >>> img_rgbcie = rgb2rgbcie(img)\n >>> img_rgb = rgbcie2rgb(img_rgbcie)\n \"\"\"\n return _convert(rgb_from_rgbcie, rgbcie)\n\n\ndef rgb2gray(rgb):\n \"\"\"Compute luminance of an RGB image.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3-D array of shape ``(.., .., 3)``,\n or in RGBA format with shape ``(.., .., 4)``.\n\n Returns\n -------\n out : ndarray\n The luminance image, a 2-D array.\n\n Raises\n ------\n ValueError\n If `rgb2gray` is not a 3-D array of shape ``(.., .., 3)`` or\n ``(.., .., 4)``.\n\n References\n ----------\n .. [1] http://www.poynton.com/PDFs/ColorFAQ.pdf\n\n Notes\n -----\n The weights used in this conversion are calibrated for contemporary\n CRT phosphors::\n\n Y = 0.2125 R + 0.7154 G + 0.0721 B\n\n If there is an alpha channel present, it is ignored.\n\n Examples\n --------\n >>> from skimage.color import rgb2gray\n >>> from skimage import data\n >>> img = data.astronaut()\n >>> img_gray = rgb2gray(img)\n \"\"\"\n\n if rgb.ndim == 2:\n return np.ascontiguousarray(rgb)\n\n rgb = _prepare_colorarray(rgb[..., :3])\n\n gray = 0.2125 * rgb[..., 0]\n gray[:] += 0.7154 * rgb[..., 1]\n gray[:] += 0.0721 * rgb[..., 2]\n\n return gray\n\n\nrgb2grey = rgb2gray\n\n\ndef gray2rgb(image, alpha=None):\n \"\"\"Create an RGB representation of a gray-level image.\n\n Parameters\n ----------\n image : array_like\n Input image of shape ``(M, N [, P])``.\n alpha : bool, optional\n Ensure that the output image has an alpha layer. If None,\n alpha layers are passed through but not created.\n\n Returns\n -------\n rgb : ndarray\n RGB image of shape ``(M, N, [, P], 3)``.\n\n Raises\n ------\n ValueError\n If the input is not a 2- or 3-dimensional image.\n\n \"\"\"\n is_rgb = False\n is_alpha = False\n dims = np.squeeze(image).ndim\n\n if dims == 3:\n if image.shape[2] == 3:\n is_rgb = True\n elif image.shape[2] == 4:\n is_alpha = True\n is_rgb = True\n\n if is_rgb:\n if alpha == False:\n image = image[..., :3]\n\n elif alpha == True and not is_alpha:\n alpha_layer = (np.ones_like(image[..., 0, np.newaxis]) *\n dtype_limits(image)[1])\n image = np.concatenate((image, alpha_layer), axis=2)\n\n return image\n\n elif image.ndim != 1 and dims in (1, 2, 3):\n image = image[..., np.newaxis]\n\n if alpha:\n alpha_layer = (np.ones_like(image) * dtype_limits(image)[1])\n return np.concatenate(3 * (image,) + (alpha_layer,), axis=-1)\n else:\n return np.concatenate(3 * (image,), axis=-1)\n\n else:\n raise ValueError(\"Input image expected to be RGB, RGBA or gray.\")\n\ngrey2rgb = gray2rgb\n\ndef xyz2lab(xyz, illuminant=\"D65\", observer=\"2\"):\n \"\"\"XYZ to CIE-LAB color space conversion.\n\n Parameters\n ----------\n xyz : array_like\n The image in XYZ format, in a 3- or 4-D array of shape\n ``(.., ..,[ ..,] 3)``.\n illuminant : {\"A\", \"D50\", \"D55\", \"D65\", \"D75\", \"E\"}, optional\n The name of the illuminant (the function is NOT case sensitive).\n observer : {\"2\", \"10\"}, optional\n The aperture angle of the observer.\n\n Returns\n -------\n out : ndarray\n The image in CIE-LAB format, in a 3- or 4-D array of shape\n ``(.., ..,[ ..,] 3)``.\n\n Raises\n ------\n ValueError\n If `xyz` is not a 3-D array of shape ``(.., ..,[ ..,] 3)``.\n ValueError\n If either the illuminant or the observer angle is unsupported or\n unknown.\n\n Notes\n -----\n By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values\n x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for\n a list of supported illuminants.\n\n References\n ----------\n .. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7\n .. [2] http://en.wikipedia.org/wiki/Lab_color_space\n\n Examples\n --------\n >>> from skimage import data\n >>> from skimage.color import rgb2xyz, xyz2lab\n >>> img = data.astronaut()\n >>> img_xyz = rgb2xyz(img)\n >>> img_lab = xyz2lab(img_xyz)\n \"\"\"\n arr = _prepare_colorarray(xyz)\n\n xyz_ref_white = get_xyz_coords(illuminant, observer)\n\n # scale by CIE XYZ tristimulus values of the reference white point\n arr = arr / xyz_ref_white\n\n # Nonlinear distortion and linear transformation\n mask = arr > 0.008856\n arr[mask] = np.power(arr[mask], 1. / 3.)\n arr[~mask] = 7.787 * arr[~mask] + 16. / 116.\n\n x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]\n\n # Vector scaling\n L = (116. * y) - 16.\n a = 500.0 * (x - y)\n b = 200.0 * (y - z)\n\n return np.concatenate([x[..., np.newaxis] for x in [L, a, b]], axis=-1)\n\n\ndef lab2xyz(lab, illuminant=\"D65\", observer=\"2\"):\n \"\"\"CIE-LAB to XYZcolor space conversion.\n\n Parameters\n ----------\n lab : array_like\n The image in lab format, in a 3-D array of shape ``(.., .., 3)``.\n illuminant : {\"A\", \"D50\", \"D55\", \"D65\", \"D75\", \"E\"}, optional\n The name of the illuminant (the function is NOT case sensitive).\n observer : {\"2\", \"10\"}, optional\n The aperture angle of the observer.\n\n Returns\n -------\n out : ndarray\n The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.\n\n Raises\n ------\n ValueError\n If `lab` is not a 3-D array of shape ``(.., .., 3)``.\n ValueError\n If either the illuminant or the observer angle are not supported or\n unknown.\n UserWarning\n If any of the pixels are invalid (Z < 0).\n\n\n Notes\n -----\n By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values x_ref\n = 95.047, y_ref = 100., z_ref = 108.883. See function 'get_xyz_coords' for\n a list of supported illuminants.\n\n References\n ----------\n .. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7\n .. [2] http://en.wikipedia.org/wiki/Lab_color_space\n\n \"\"\"\n\n arr = _prepare_colorarray(lab).copy()\n\n L, a, b = arr[:, :, 0], arr[:, :, 1], arr[:, :, 2]\n y = (L + 16.) / 116.\n x = (a / 500.) + y\n z = y - (b / 200.)\n\n if np.any(z < 0):\n invalid = np.nonzero(z < 0)\n warn('Color data out of range: Z < 0 in %s pixels' % invalid[0].size)\n z[invalid] = 0\n\n out = np.dstack([x, y, z])\n\n mask = out > 0.2068966\n out[mask] = np.power(out[mask], 3.)\n out[~mask] = (out[~mask] - 16.0 / 116.) / 7.787\n\n # rescale to the reference white (illuminant)\n xyz_ref_white = get_xyz_coords(illuminant, observer)\n out *= xyz_ref_white\n return out\n\n\ndef rgb2lab(rgb):\n \"\"\"RGB to lab color space conversion.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3- or 4-D array of shape\n ``(.., ..,[ ..,] 3)``.\n\n Returns\n -------\n out : ndarray\n The image in Lab format, in a 3- or 4-D array of shape\n ``(.., ..,[ ..,] 3)``.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3- or 4-D array of shape ``(.., ..,[ ..,] 3)``.\n\n Notes\n -----\n This function uses rgb2xyz and xyz2lab.\n \"\"\"\n return xyz2lab(rgb2xyz(rgb))\n\n\ndef lab2rgb(lab):\n \"\"\"Lab to RGB color space conversion.\n\n Parameters\n ----------\n lab : array_like\n The image in Lab format, in a 3-D array of shape ``(.., .., 3)``.\n\n Returns\n -------\n out : ndarray\n The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.\n\n Raises\n ------\n ValueError\n If `lab` is not a 3-D array of shape ``(.., .., 3)``.\n\n Notes\n -----\n This function uses lab2xyz and xyz2rgb.\n \"\"\"\n return xyz2rgb(lab2xyz(lab))\n\n\ndef xyz2luv(xyz, illuminant=\"D65\", observer=\"2\"):\n \"\"\"XYZ to CIE-Luv color space conversion.\n\n Parameters\n ----------\n xyz : (M, N, [P,] 3) array_like\n The 3 or 4 dimensional image in XYZ format. Final dimension denotes\n channels.\n illuminant : {\"A\", \"D50\", \"D55\", \"D65\", \"D75\", \"E\"}, optional\n The name of the illuminant (the function is NOT case sensitive).\n observer : {\"2\", \"10\"}, optional\n The aperture angle of the observer.\n\n Returns\n -------\n out : (M, N, [P,] 3) ndarray\n The image in CIE-Luv format. Same dimensions as input.\n\n Raises\n ------\n ValueError\n If `xyz` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.\n ValueError\n If either the illuminant or the observer angle are not supported or\n unknown.\n\n Notes\n -----\n By default XYZ conversion weights use observer=2A. Reference whitepoint\n for D65 Illuminant, with XYZ tristimulus values of ``(95.047, 100.,\n 108.883)``. See function 'get_xyz_coords' for a list of supported\n illuminants.\n\n References\n ----------\n .. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16\n .. [2] http://en.wikipedia.org/wiki/CIELUV\n\n Examples\n --------\n >>> from skimage import data\n >>> from skimage.color import rgb2xyz, xyz2luv\n >>> img = data.astronaut()\n >>> img_xyz = rgb2xyz(img)\n >>> img_luv = xyz2luv(img_xyz)\n \"\"\"\n arr = _prepare_colorarray(xyz)\n\n # extract channels\n x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]\n\n eps = np.finfo(np.float).eps\n\n # compute y_r and L\n xyz_ref_white = get_xyz_coords(illuminant, observer)\n L = y / xyz_ref_white[1]\n mask = L > 0.008856\n L[mask] = 116. * np.power(L[mask], 1. / 3.) - 16.\n L[~mask] = 903.3 * L[~mask]\n\n u0 = 4 * xyz_ref_white[0] / np.dot([1, 15, 3], xyz_ref_white)\n v0 = 9 * xyz_ref_white[1] / np.dot([1, 15, 3], xyz_ref_white)\n\n # u' and v' helper functions\n def fu(X, Y, Z):\n return (4. * X) / (X + 15. * Y + 3. * Z + eps)\n\n def fv(X, Y, Z):\n return (9. * Y) / (X + 15. * Y + 3. * Z + eps)\n\n # compute u and v using helper functions\n u = 13. * L * (fu(x, y, z) - u0)\n v = 13. * L * (fv(x, y, z) - v0)\n\n return np.concatenate([q[..., np.newaxis] for q in [L, u, v]], axis=-1)\n\n\ndef luv2xyz(luv, illuminant=\"D65\", observer=\"2\"):\n \"\"\"CIE-Luv to XYZ color space conversion.\n\n Parameters\n ----------\n luv : (M, N, [P,] 3) array_like\n The 3 or 4 dimensional image in CIE-Luv format. Final dimension denotes\n channels.\n illuminant : {\"A\", \"D50\", \"D55\", \"D65\", \"D75\", \"E\"}, optional\n The name of the illuminant (the function is NOT case sensitive).\n observer : {\"2\", \"10\"}, optional\n The aperture angle of the observer.\n\n Returns\n -------\n out : (M, N, [P,] 3) ndarray\n The image in XYZ format. Same dimensions as input.\n\n Raises\n ------\n ValueError\n If `luv` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.\n ValueError\n If either the illuminant or the observer angle are not supported or\n unknown.\n\n Notes\n -----\n XYZ conversion weights use observer=2A. Reference whitepoint for D65\n Illuminant, with XYZ tristimulus values of ``(95.047, 100., 108.883)``. See\n function 'get_xyz_coords' for a list of supported illuminants.\n\n References\n ----------\n .. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16\n .. [2] http://en.wikipedia.org/wiki/CIELUV\n\n \"\"\"\n\n arr = _prepare_colorarray(luv).copy()\n\n L, u, v = arr[:, :, 0], arr[:, :, 1], arr[:, :, 2]\n\n eps = np.finfo(np.float).eps\n\n # compute y\n y = L.copy()\n mask = y > 7.999625\n y[mask] = np.power((y[mask] + 16.) / 116., 3.)\n y[~mask] = y[~mask] / 903.3\n xyz_ref_white = get_xyz_coords(illuminant, observer)\n y *= xyz_ref_white[1]\n\n # reference white x,z\n uv_weights = [1, 15, 3]\n u0 = 4 * xyz_ref_white[0] / np.dot(uv_weights, xyz_ref_white)\n v0 = 9 * xyz_ref_white[1] / np.dot(uv_weights, xyz_ref_white)\n\n # compute intermediate values\n a = u0 + u / (13. * L + eps)\n b = v0 + v / (13. * L + eps)\n c = 3 * y * (5 * b - 3)\n\n # compute x and z\n z = ((a - 4) * c - 15 * a * b * y) / (12 * b)\n x = -(c / b + 3. * z)\n\n return np.concatenate([q[..., np.newaxis] for q in [x, y, z]], axis=-1)\n\n\ndef rgb2luv(rgb):\n \"\"\"RGB to CIE-Luv color space conversion.\n\n Parameters\n ----------\n rgb : (M, N, [P,] 3) array_like\n The 3 or 4 dimensional image in RGB format. Final dimension denotes\n channels.\n\n Returns\n -------\n out : (M, N, [P,] 3) ndarray\n The image in CIE Luv format. Same dimensions as input.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.\n\n Notes\n -----\n This function uses rgb2xyz and xyz2luv.\n \"\"\"\n return xyz2luv(rgb2xyz(rgb))\n\n\ndef luv2rgb(luv):\n \"\"\"Luv to RGB color space conversion.\n\n Parameters\n ----------\n luv : (M, N, [P,] 3) array_like\n The 3 or 4 dimensional image in CIE Luv format. Final dimension denotes\n channels.\n\n Returns\n -------\n out : (M, N, [P,] 3) ndarray\n The image in RGB format. Same dimensions as input.\n\n Raises\n ------\n ValueError\n If `luv` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.\n\n Notes\n -----\n This function uses luv2xyz and xyz2rgb.\n \"\"\"\n return xyz2rgb(luv2xyz(luv))\n\n\ndef rgb2hed(rgb):\n \"\"\"RGB to Haematoxylin-Eosin-DAB (HED) color space conversion.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.\n\n Returns\n -------\n out : ndarray\n The image in HED format, in a 3-D array of shape ``(.., .., 3)``.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3-D array of shape ``(.., .., 3)``.\n\n\n References\n ----------\n .. [1] A. C. Ruifrok and D. A. Johnston, \"Quantification of histochemical\n staining by color deconvolution.,\" Analytical and quantitative\n cytology and histology / the International Academy of Cytology [and]\n American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.\n\n Examples\n --------\n >>> from skimage import data\n >>> from skimage.color import rgb2hed\n >>> ihc = data.immunohistochemistry()\n >>> ihc_hed = rgb2hed(ihc)\n \"\"\"\n return separate_stains(rgb, hed_from_rgb)\n\n\ndef hed2rgb(hed):\n \"\"\"Haematoxylin-Eosin-DAB (HED) to RGB color space conversion.\n\n Parameters\n ----------\n hed : array_like\n The image in the HED color space, in a 3-D array of shape\n ``(.., .., 3)``.\n\n Returns\n -------\n out : ndarray\n The image in RGB, in a 3-D array of shape ``(.., .., 3)``.\n\n Raises\n ------\n ValueError\n If `hed` is not a 3-D array of shape ``(.., .., 3)``.\n\n References\n ----------\n .. [1] A. C. Ruifrok and D. A. Johnston, \"Quantification of histochemical\n staining by color deconvolution.,\" Analytical and quantitative\n cytology and histology / the International Academy of Cytology [and]\n American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.\n\n Examples\n --------\n >>> from skimage import data\n >>> from skimage.color import rgb2hed, hed2rgb\n >>> ihc = data.immunohistochemistry()\n >>> ihc_hed = rgb2hed(ihc)\n >>> ihc_rgb = hed2rgb(ihc_hed)\n \"\"\"\n return combine_stains(hed, rgb_from_hed)\n\n\ndef separate_stains(rgb, conv_matrix):\n \"\"\"RGB to stain color space conversion.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.\n conv_matrix: ndarray\n The stain separation matrix as described by G. Landini [1]_.\n\n Returns\n -------\n out : ndarray\n The image in stain color space, in a 3-D array of shape\n ``(.., .., 3)``.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3-D array of shape ``(.., .., 3)``.\n\n Notes\n -----\n Stain separation matrices available in the ``color`` module and their\n respective colorspace:\n\n * ``hed_from_rgb``: Hematoxylin + Eosin + DAB\n * ``hdx_from_rgb``: Hematoxylin + DAB\n * ``fgx_from_rgb``: Feulgen + Light Green\n * ``bex_from_rgb``: Giemsa stain : Methyl Blue + Eosin\n * ``rbd_from_rgb``: FastRed + FastBlue + DAB\n * ``gdx_from_rgb``: Methyl Green + DAB\n * ``hax_from_rgb``: Hematoxylin + AEC\n * ``bro_from_rgb``: Blue matrix Anilline Blue + Red matrix Azocarmine\\\n + Orange matrix Orange-G\n * ``bpx_from_rgb``: Methyl Blue + Ponceau Fuchsin\n * ``ahx_from_rgb``: Alcian Blue + Hematoxylin\n * ``hpx_from_rgb``: Hematoxylin + PAS\n\n References\n ----------\n .. [1] http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html\n\n Examples\n --------\n >>> from skimage import data\n >>> from skimage.color import separate_stains, hdx_from_rgb\n >>> ihc = data.immunohistochemistry()\n >>> ihc_hdx = separate_stains(ihc, hdx_from_rgb)\n \"\"\"\n rgb = dtype.img_as_float(rgb, force_copy=True)\n rgb += 2\n stains = np.dot(np.reshape(-np.log(rgb), (-1, 3)), conv_matrix)\n return np.reshape(stains, rgb.shape)\n\n\ndef combine_stains(stains, conv_matrix):\n \"\"\"Stain to RGB color space conversion.\n\n Parameters\n ----------\n stains : array_like\n The image in stain color space, in a 3-D array of shape\n ``(.., .., 3)``.\n conv_matrix: ndarray\n The stain separation matrix as described by G. Landini [1]_.\n\n Returns\n -------\n out : ndarray\n The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.\n\n Raises\n ------\n ValueError\n If `stains` is not a 3-D array of shape ``(.., .., 3)``.\n\n Notes\n -----\n Stain combination matrices available in the ``color`` module and their\n respective colorspace:\n\n * ``rgb_from_hed``: Hematoxylin + Eosin + DAB\n * ``rgb_from_hdx``: Hematoxylin + DAB\n * ``rgb_from_fgx``: Feulgen + Light Green\n * ``rgb_from_bex``: Giemsa stain : Methyl Blue + Eosin\n * ``rgb_from_rbd``: FastRed + FastBlue + DAB\n * ``rgb_from_gdx``: Methyl Green + DAB\n * ``rgb_from_hax``: Hematoxylin + AEC\n * ``rgb_from_bro``: Blue matrix Anilline Blue + Red matrix Azocarmine\\\n + Orange matrix Orange-G\n * ``rgb_from_bpx``: Methyl Blue + Ponceau Fuchsin\n * ``rgb_from_ahx``: Alcian Blue + Hematoxylin\n * ``rgb_from_hpx``: Hematoxylin + PAS\n\n References\n ----------\n .. [1] http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html\n\n\n Examples\n --------\n >>> from skimage import data\n >>> from skimage.color import (separate_stains, combine_stains,\n ... hdx_from_rgb, rgb_from_hdx)\n >>> ihc = data.immunohistochemistry()\n >>> ihc_hdx = separate_stains(ihc, hdx_from_rgb)\n >>> ihc_rgb = combine_stains(ihc_hdx, rgb_from_hdx)\n \"\"\"\n from ..exposure import rescale_intensity\n\n stains = dtype.img_as_float(stains)\n logrgb2 = np.dot(-np.reshape(stains, (-1, 3)), conv_matrix)\n rgb2 = np.exp(logrgb2)\n return rescale_intensity(np.reshape(rgb2 - 2, stains.shape),\n in_range=(-1, 1))\n\n\ndef lab2lch(lab):\n \"\"\"CIE-LAB to CIE-LCH color space conversion.\n\n LCH is the cylindrical representation of the LAB (Cartesian) colorspace\n\n Parameters\n ----------\n lab : array_like\n The N-D image in CIE-LAB format. The last (``N+1``-th) dimension must\n have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``\n color channels. Subsequent elements are copied.\n\n Returns\n -------\n out : ndarray\n The image in LCH format, in a N-D array with same shape as input `lab`.\n\n Raises\n ------\n ValueError\n If `lch` does not have at least 3 color channels (i.e. l, a, b).\n\n Notes\n -----\n The Hue is expressed as an angle between ``(0, 2*pi)``\n\n Examples\n --------\n >>> from skimage import data\n >>> from skimage.color import rgb2lab, lab2lch\n >>> img = data.astronaut()\n >>> img_lab = rgb2lab(img)\n >>> img_lch = lab2lch(img_lab)\n \"\"\"\n lch = _prepare_lab_array(lab)\n\n a, b = lch[..., 1], lch[..., 2]\n lch[..., 1], lch[..., 2] = _cart2polar_2pi(a, b)\n return lch\n\n\ndef _cart2polar_2pi(x, y):\n \"\"\"convert cartesian coordinates to polar (uses non-standard theta range!)\n\n NON-STANDARD RANGE! Maps to ``(0, 2*pi)`` rather than usual ``(-pi, +pi)``\n \"\"\"\n r, t = np.hypot(x, y), np.arctan2(y, x)\n t += np.where(t < 0., 2 * np.pi, 0)\n return r, t\n\n\ndef lch2lab(lch):\n \"\"\"CIE-LCH to CIE-LAB color space conversion.\n\n LCH is the cylindrical representation of the LAB (Cartesian) colorspace\n\n Parameters\n ----------\n lch : array_like\n The N-D image in CIE-LCH format. The last (``N+1``-th) dimension must\n have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``\n color channels. Subsequent elements are copied.\n\n Returns\n -------\n out : ndarray\n The image in LAB format, with same shape as input `lch`.\n\n Raises\n ------\n ValueError\n If `lch` does not have at least 3 color channels (i.e. l, c, h).\n\n Examples\n --------\n >>> from skimage import data\n >>> from skimage.color import rgb2lab, lch2lab\n >>> img = data.astronaut()\n >>> img_lab = rgb2lab(img)\n >>> img_lch = lab2lch(img_lab)\n >>> img_lab2 = lch2lab(img_lch)\n \"\"\"\n lch = _prepare_lab_array(lch)\n\n c, h = lch[..., 1], lch[..., 2]\n lch[..., 1], lch[..., 2] = c * np.cos(h), c * np.sin(h)\n return lch\n\n\ndef _prepare_lab_array(arr):\n \"\"\"Ensure input for lab2lch, lch2lab are well-posed.\n\n Arrays must be in floating point and have at least 3 elements in\n last dimension. Return a new array.\n \"\"\"\n arr = np.asarray(arr)\n shape = arr.shape\n if shape[-1] < 3:\n raise ValueError('Input array has less than 3 color channels')\n return dtype.img_as_float(arr, force_copy=True)\n\n\ndef rgb2yuv(rgb):\n \"\"\"RGB to YUV color space conversion.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Returns\n -------\n out : ndarray\n The image in YUV format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.\n\n Notes\n -----\n Y is between 0 and 1. Use YCbCr instead of YUV for the color space which\n is commonly used by video codecs (where Y ranges from 16 to 235)\n \"\"\"\n return _convert(yuv_from_rgb, rgb)\n\n\ndef rgb2yiq(rgb):\n \"\"\"RGB to YIQ color space conversion.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Returns\n -------\n out : ndarray\n The image in YIQ format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.\n \"\"\"\n return _convert(yiq_from_rgb, rgb)\n\n\ndef rgb2ypbpr(rgb):\n \"\"\"RGB to YIQ color space conversion.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Returns\n -------\n out : ndarray\n The image in YIQ format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.\n \"\"\"\n return _convert(ypbpr_from_rgb, rgb)\n\n\ndef rgb2ycbcr(rgb):\n \"\"\"RGB to YCbCr color space conversion.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Returns\n -------\n out : ndarray\n The image in YCbCr format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.\n\n Notes\n -----\n Y is between 16 and 235. This is the color space which is commonly used\n by video codecs, it is sometimes incorrectly called \"YUV\"\n \"\"\"\n arr = _convert(ycbcr_from_rgb, rgb)\n arr[..., 0] += 16\n arr[..., 1] += 128\n arr[..., 2] += 128\n return arr\n\n\ndef yuv2rgb(yuv):\n \"\"\"RGB to YIQ color space conversion.\n\n Parameters\n ----------\n rgb : array_like\n The image in RGB format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Returns\n -------\n out : ndarray\n The image in YIQ format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Raises\n ------\n ValueError\n If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.\n \"\"\"\n return _convert(rgb_from_yuv, yuv)\n\n\ndef yiq2rgb(yiq):\n \"\"\"YIQ to RGB color space conversion.\n\n Parameters\n ----------\n yiq : array_like\n The image in YIQ format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Returns\n -------\n out : ndarray\n The image in RGB format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Raises\n ------\n ValueError\n If `yiq` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.\n \"\"\"\n return _convert(rgb_from_yiq, yiq)\n\n\ndef ypbpr2rgb(ypbpr):\n \"\"\"YPbPr to RGB color space conversion.\n\n Parameters\n ----------\n ypbpr : array_like\n The image in YPbPr format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Returns\n -------\n out : ndarray\n The image in RGB format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Raises\n ------\n ValueError\n If `ypbpr` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.\n \"\"\"\n return _convert(rgb_from_ypbpr, ypbpr)\n\n\ndef ycbcr2rgb(ycbcr):\n \"\"\"YCbCr to RGB color space conversion.\n\n Parameters\n ----------\n ycbcr : array_like\n The image in YCbCr format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Returns\n -------\n out : ndarray\n The image in RGB format, in a 3- or 4-D array of shape\n ``(M, N, [P,] 3)``.\n\n Raises\n ------\n ValueError\n If `ycbcr` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.\n\n Notes\n -----\n Y is between 16 and 235. This is the color space which is commonly used\n by video codecs, it is sometimes incorrectly called \"YUV\"\n \"\"\"\n arr = ycbcr.copy()\n arr[..., 0] -= 16\n arr[..., 1] -= 128\n arr[..., 2] -= 128\n return _convert(rgb_from_ycbcr, arr)\n",
"\"\"\"\n==========================================================\nApply a set of \"Gabor\" and \"Morlet\" filters to an picture\n==========================================================\n\nIn this example, we show the difference between filtering an image with the\nGabor filter and the Morlet filter.\n\n\nMorlet Filter\n---------------------\n\nZero sum version of the Gabor filter.\n\n\"\"\"\nimport numpy as np\nimport skimage\nfrom skimage.filters import gabor_kernel\nfrom skimage.filters import morlet_kernel\nimport matplotlib.pylab as plt\nfrom skimage import data\nfrom skimage.util import img_as_float\nfrom scipy import ndimage as ndi\n\nimage = img_as_float(data.load('brick.png'))\nimage = image[0:64,0:64]\n\n\nJ = 4\nL = 8\nxi_psi = 3. / 4 * np.pi\nsigma_xi = .8\nslant = 4. / L\n\n#show image\nplt.figure(figsize=(16, 8))\nplt.imshow(image)\nplt.title('Original image')\n\n# Generate a group of gabor filters and apply it to the brick image\n\nplt.figure(figsize=(16, 8))\nfor j, scale in enumerate(2 ** np.arange(J)):\n for l, theta in enumerate(np.arange(L) / float(L) * np.pi):\n sigma = sigma_xi * scale\n xi = xi_psi / scale\n\n sigma_x = sigma\n sigma_y = sigma / slant\n freq = xi / (np.pi * 2)\n\n gabor = gabor_kernel(freq, theta=theta, sigma_x=sigma_x, sigma_y=sigma_y)\n\n im_filtered = np.abs(ndi.convolve(image, gabor, mode='wrap'))\n\n plt.subplot(J, L, j * L + l + 1)\n plt.imshow(np.real(im_filtered), interpolation='nearest')\n\n plt.viridis()\n\nplt.suptitle('Gabor (different scales and orientations)')\n# Generate a group of morlet filters and apply it to the brick image\n\nplt.figure(figsize=(16, 8))\nfor j, scale in enumerate(2 ** np.arange(J)):\n for l, theta in enumerate(np.arange(L) / float(L) * np.pi):\n sigma = sigma_xi * scale\n xi = xi_psi / scale\n\n sigma_x = sigma\n sigma_y = sigma / slant\n freq = xi / (np.pi * 2)\n\n morlet = morlet_kernel(freq, theta=theta, sigma_x=sigma_x, sigma_y=sigma_y)\n\n im_filtered = np.abs(ndi.convolve(image, morlet, mode='wrap'))\n\n plt.subplot(J, L, j * L + l + 1)\n plt.imshow(np.real(im_filtered), interpolation='nearest')\n\n plt.viridis()\n\nplt.suptitle('Morlet (different scales and orientations)')\n\nplt.show()\n\nprint('The energy of the filtered image changes with the gabor fiter but not with the Gabor:')\nim_filtered = np.abs(ndi.convolve(image, morlet, mode='wrap'))\nprint('[Morlet] energy:',im_filtered.sum())\nim_filtered100 = np.abs(ndi.convolve(image+100, morlet, mode='wrap'))\nprint('[Morlet] energy (im+100):',im_filtered100.sum())\n\nim_filtered = np.abs(ndi.convolve(image, gabor, mode='wrap'))\nprint('[Gabor] energy:',im_filtered.sum())\nim_filtered100 = np.abs(ndi.convolve(image+100, gabor, mode='wrap'))\nprint('[Gabor] energy (im+100):',im_filtered100.sum())\n"
] |
[
[
"numpy.dot",
"numpy.asarray",
"numpy.squeeze",
"numpy.concatenate",
"numpy.seterr",
"numpy.arctan2",
"numpy.any",
"numpy.cross",
"numpy.exp",
"numpy.where",
"numpy.hypot",
"numpy.ones_like",
"numpy.reshape",
"numpy.empty_like",
"numpy.finfo",
"numpy.sin",
"numpy.asanyarray",
"scipy.linalg.inv",
"numpy.log",
"numpy.nonzero",
"numpy.power",
"numpy.ascontiguousarray",
"numpy.isnan",
"numpy.floor",
"numpy.array",
"numpy.cos",
"numpy.dstack"
],
[
"matplotlib.pylab.suptitle",
"matplotlib.pylab.show",
"numpy.arange",
"scipy.ndimage.convolve",
"matplotlib.pylab.title",
"matplotlib.pylab.viridis",
"matplotlib.pylab.subplot",
"numpy.real",
"matplotlib.pylab.figure",
"matplotlib.pylab.imshow"
]
] |
maxhutch/packtets
|
[
"eba7d3d354da9bef50bfdbc48e6934c4e17f165c"
] |
[
"packtets/utils/vis.py"
] |
[
"try:\n import mpl_toolkits.mplot3d as a3\n import matplotlib.pyplot as plt\nexcept ImportError:\n pass\n\ndef plot_packing(packing, box=None, use_symmetry=False):\n \"\"\"Plot packing within box\"\"\"\n ax = a3.Axes3D(plt.figure(10))\n\n if box is not None:\n bounding = [box.vx, box.vy, box.vz]\n for i,j in [(0,1), (0,2), (1,2)]:\n verts = [(0,0,0)]\n verts.append(bounding[i])\n verts.append(bounding[i]+bounding[j])\n verts.append(bounding[j])\n face = a3.art3d.Poly3DCollection([verts], alpha=0.1)\n face.set_facecolor('red')\n face.set_edgecolor('k')\n ax.add_collection3d(face)\n \n ax.set_xlim(0,max([v[0] for v in bounding]))\n ax.set_ylim(0,max([v[1] for v in bounding]))\n ax.set_zlim(0,max([v[2] for v in bounding]));\n\n\n for tet in packing:\n if box is not None and use_symmetry:\n syms = tet.get_symmetry(box.vx, box.vy, box.vz)\n else:\n syms = [tet]\n\n for s in syms:\n for x,y,z in [(0,1,2), (0,1,3), (0,2,3), (1,2,3)]:\n verts = [tuple(s.verts[x]), tuple(s.verts[y]), tuple(s.verts[z])]\n tri = a3.art3d.Poly3DCollection([verts], alpha=0.2)\n tri.set_edgecolor('k')\n ax.add_collection3d(tri)\n\n return\n"
] |
[
[
"matplotlib.pyplot.figure"
]
] |
hugorichard/fmralign
|
[
"b8990cc22204591399b731460375b99254b38527",
"b8990cc22204591399b731460375b99254b38527"
] |
[
"fmralign/metrics.py",
"fmralign/fetch_example_data.py"
] |
[
"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom scipy.stats import pearsonr\nfrom sklearn.metrics import r2_score\n\n\ndef score_voxelwise(ground_truth, prediction, masker, loss,\n multioutput='raw_values'):\n \"\"\"\n Calculates loss function for predicted, ground truth\n arrays. Supported scores are R2, correlation, and normalized\n reconstruction error (Bazeille et al., 2019)\n\n Parameters\n ----------\n ground_truth: 3D or 4D Niimg\n Reference image\n prediction : 3D or 4D Niimg\n Same shape as `ground_truth`\n masker: instance of NiftiMasker or MultiNiftiMasker\n Masker to be used on ground_truth and prediction. For more information see:\n http://nilearn.github.io/manipulating_images/masker_objects.html\n loss : str in ['R2', 'corr', 'n_reconstruction_err']\n The loss function used in scoring. Default is normalized\n reconstruction error.\n 'R2' :\n The R2 distance between source and target arrays.\n Best possible score is 1.0 and it can be negative (because the\n model can be arbitrarily worse).\n 'corr' :\n The correlation between source and target arrays.\n 'n_reconstruction_err' :\n The normalized reconstruction error. A perfect prediction\n yields a value of 1.0\n multioutput: str in ['raw_values', 'uniform_average']\n Defines method for aggregating multiple output scores. Default method\n is 'raw_values' i.e. no aggregation.\n 'raw_values' :\n Returns a full set of scores in case of multioutput input.\n 'uniform_average' :\n Scores of all outputs are averaged with uniform weight.\n\n Returns\n -------\n score : float or ndarray of floats\n The score or ndarray of scores if ‘multioutput’ is ‘raw_values’.\n The worst possible score is arbitrarily set to -1 for all metrics.\n \"\"\"\n X_gt = masker.transform(ground_truth)\n X_pred = masker.transform(prediction)\n\n if loss is \"R2\":\n score = r2_score(X_gt, X_pred, multioutput=multioutput)\n elif loss is \"n_reconstruction_err\":\n score = normalized_reconstruction_error(\n X_gt, X_pred, multioutput=multioutput)\n elif loss is \"corr\":\n score = np.array([pearsonr(X_gt[:, vox], X_pred[:, vox])[0] # pearsonr returns both rho and p\n for vox in range(X_pred.shape[1])])\n if multioutput == \"uniform_average\":\n score = np.mean(score)\n else:\n raise NameError(\n \"Unknown loss. Recognized values are 'R2', 'corr', or 'reconstruction_err'\")\n # if the calculated score is less than -1, return -1\n return np.maximum(score, -1)\n\n\ndef normalized_reconstruction_error(y_true, y_pred, sample_weights=None,\n multioutput='raw_values'):\n \"\"\"\n Calculates the normalized reconstruction error\n as defined by Bazeille and colleagues (2019).\n\n A perfect prediction yields a value of 1.\n\n Parameters\n ----------\n y_true : arr\n The ground truth array.\n y_pred : arr\n The predicted array.\n sample_weights : arr\n Weights to assign to each sample.\n multioutput: str in ['raw_values', 'uniform_average']\n Defines method for aggregating multiple output scores. Default method\n is 'raw_values' i.e. no aggregation.\n 'raw_values' :\n Returns a full set of scores in case of multioutput input.\n 'uniform_average' :\n Scores of all outputs are averaged with uniform weight.\n\n Returns\n -------\n score : float or ndarray of floats\n The score or ndarray of scores if `multioutput` is `raw_values`.\n\n References\n ----------\n `Bazeille T., Richard H., Janati H., and Thirion B. (2019) Local\n Optimal Transport for Functional Brain Template Estimation.\n In: Chung A., Gee J., Yushkevich P., and Bao S. (eds) Information\n Processing in Medical Imaging. Lecture Notes in Computer Science,\n vol 11492. Springer, Cham.\n DOI: 10.1007/978-3-030-20351-1_18.`\n \"\"\"\n if y_true.ndim == 1:\n y_true = y_true.reshape((-1, 1))\n\n if y_pred.ndim == 1:\n y_pred = y_pred.reshape((-1, 1))\n\n numerator = ((y_true - y_pred) ** 2).sum(axis=0, dtype=np.float64)\n denominator = ((y_true) ** 2).sum(axis=0, dtype=np.float64)\n\n # Include only non-zero values\n nonzero_denominator = (denominator != 0)\n nonzero_numerator = (numerator != 0)\n valid_score = (nonzero_denominator & nonzero_numerator)\n\n # Calculate reconstruction error\n output_scores = np.ones([y_true.shape[-1]])\n output_scores[valid_score] = 1 - (numerator[valid_score] /\n denominator[valid_score])\n if multioutput == 'raw_values':\n # return scores individually\n return output_scores\n\n elif multioutput == 'uniform_average':\n # passing None as weights yields uniform average\n return np.average(output_scores, weights=None)\n\n\ndef reconstruction_ratio(aligned_error, identity_error):\n \"\"\"\n Calculates the reconstruction error\n as defined by Bazeille and\n colleagues (2019).\n\n A value greater than 0 indicates that\n voxels are predicted better by aligned data\n than by raw data.\n\n Parameters\n ----------\n aligned_error : float or ndarray of floats\n The reconstruction error from a given\n functional alignment method\n identity error : float or ndarray of floats\n The reconstruction error from predicting\n the target subject as the source subject\n\n References\n ----------\n `Bazeille T., Richard H., Janati H., and Thirion B. (2019) Local\n Optimal Transport for Functional Brain Template Estimation.\n In: Chung A., Gee J., Yushkevich P., and Bao S. (eds) Information\n Processing in Medical Imaging. Lecture Notes in Computer Science,\n vol 11492. Springer, Cham.\n DOI: 10.1007/978-3-030-20351-1_18.`\n \"\"\"\n num = 1 - aligned_error\n den = 1 - identity_error\n try:\n return 1 - (num / den)\n except ZeroDivisionError:\n return 0.0\n",
"# -*- coding: utf-8 -*-\nimport os\nfrom nilearn.datasets.utils import _fetch_files, _get_dataset_dir\nimport pandas as pd\n\n\ndef fetch_ibc_subjects_contrasts(subjects, data_dir=None, verbose=1):\n \"\"\"Fetch all IBC contrast maps for each of subjects.\n After downloading all relevant images that are not already cached,\n it returns a dataframe with all needed links.\n\n Parameters\n ----------\n subjects : list of str.\n Subjects data to download. Available strings are ['sub-01', 'sub-02',\n 'sub-04' ... 'sub-09', 'sub-11' ... sub-15]\n data_dir: string, optional\n Path of the data directory. Used to force data storage in a specified\n location.\n verbose: int, optional\n verbosity level (0 means no message).\n\n Returns\n -------\n files : list of list of str\n List (for every subject) of list of path (for every conditions),\n in ap then pa acquisition.\n metadata_df : Pandas Dataframe\n Table containing some metadata for each available image in the dataset,\n as well as their pathself.\n Filtered to contain only the 'subjects' parameter metadatas\n mask: str\n Path to the mask to be used on the data\n Notes\n ------\n This function is a caller to nilearn.datasets.utils._fetch_files in order\n to simplify examples reading and understanding for fmralign.\n See Also\n ---------\n nilearn.datasets.fetch_localizer_calculation_task\n nilearn.datasets.fetch_localizer_contrasts\n \"\"\"\n # The URLs can be retrieved from the nilearn account on OSF\n if subjects is \"all\":\n subjects = ['sub-%02d' %\n i for i in [1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15]]\n dataset_name = 'ibc'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n\n # download or retrieve metadatas, put it in a dataframe,\n # list all condition and specify path to the right directory\n metadata_path = _fetch_files(data_dir, [('ibc_3mm_all_subjects_metadata.csv',\n \"https://osf.io/pcvje/download\",\n {\"uncompress\": True})],\n verbose=verbose)\n metadata_df = pd.read_csv(metadata_path[0])\n conditions = metadata_df.condition.unique()\n metadata_df['path'] = metadata_df['path'].str.replace(\n 'path_to_dir', data_dir)\n # filter the dataframe to return only rows relevant for subjects argument\n metadata_df = metadata_df[metadata_df.subject.isin(subjects)]\n\n # download / retrieve mask niimg and find its path\n mask = _fetch_files(\n data_dir, [('gm_mask_3mm.nii.gz', \"https://osf.io/yvju3/download\",\n {\"uncompress\": True})], verbose=verbose)[0]\n\n # list all url keys for downloading separetely each subject data\n url_keys = {\"sub-01\": \"8z23h\", \"sub-02\": \"e9kbm\", \"sub-04\": \"qn5b6\",\n \"sub-05\": \"u74a3\", \"sub-06\": \"83bje\", \"sub-07\": \"43j69\",\n \"sub-08\": \"ua8qx\", \"sub-09\": \"bxwtv\", \"sub-11\": \"3dfbv\",\n \"sub-12\": \"uat7d\", \"sub-13\": \"p238h\", \"sub-14\": \"prdk4\",\n \"sub-15\": \"sw72z\"}\n\n # for all subjects in argument, download all contrasts images and list\n # their path in the variable files\n opts = {'uncompress': True}\n files = []\n for subject in subjects:\n url = \"https://osf.io/%s/download\" % url_keys[subject]\n filenames = [(os.path.join(subject, \"%s_ap.nii.gz\" % condition),\n url, opts) for condition in conditions]\n filenames.extend([(os.path.join(subject, \"%s_pa.nii.gz\" % condition),\n url, opts) for condition in conditions])\n files.append(_fetch_files(data_dir, filenames, verbose=verbose))\n return files, metadata_df, mask\n"
] |
[
[
"numpy.maximum",
"sklearn.metrics.r2_score",
"scipy.stats.pearsonr",
"numpy.ones",
"numpy.mean",
"numpy.average"
],
[
"pandas.read_csv"
]
] |
KristinaUlicna/NanoPush
|
[
"7d9be1ea5b0c686920b33c2278fb9a7b30c0f7d0"
] |
[
"NanoPush/Function - Alignment (my initial approach).py"
] |
[
"# General directory for HDF5 files:\n\"/Users/kristinaulicna/Documents/Rotation_1_Rob_Lowe/Nanopore sequences(fast5)/...\"\n\n\n# UNIVERSAL FUNCTION FOR THE ENTIRE ANALYSIS OF ALL (+ve or -ve) HDF5 FILES:\n\ndef AlignQueToRef(HDF5_file):\n \"\"\"Insert file name into \"\" with a full directory (from /Users/...)\n 1.) Extracts the FastQ sequence (string) from an HDF5 file type.\n 2.) Creates the alignment of your query sequence to the reference.\n 3.) Checks for strand orientation. Leaves positive strand untouched.\n 4.) Converts a negative strand into positive by creating complement.\n 5.) Re-aligns the optimised strand to the reference & prints both sequences.\n 6.) Maps the enumeration of respective mismatches, insertions and deletions\n from the Cigar sequence + prints the analysis:\n - Breaks down of aligned Ref & Que sequences accoring to the Cigar Parameters.\n - Produces a continuous Ref & Que sequences (with respective in/del gaps).\n - Summarizes the total number of matches/mismatches, insertions and deletions.\n - Prints out each individual base in Ref and Que on contig position (no gaps in Ref seq).\n - Checks & prints out mismatches in the aligned sequences.\n RETURNS hit, reference, query_seq, reference_with_gaps, query_with_gaps.\"\"\"\n print (\"HDF5 file name:\", \"\\t\", HDF5_file)\n\n #Extraction of FastQ sequence (string) from an HDF5 file type:\n import h5py\n file = h5py.File(HDF5_file, \"r\")\n import numpy as np\n readSeq = np.array(file[\"Analyses\"][\"Basecall_1D_000\"][\"BaseCalled_template\"][\"Fastq\"])\n readSeq = str(readSeq)\n readSeq = readSeq.split(\"\\\\n\")\n readSeq = readSeq[1]\n print (\"ReadSeq:\", \"\\t\", len(readSeq), \"\\t\", readSeq, \"\\t\", type(readSeq))\n\n #Alignment of the readSeq sequence to the reference genome (honeybee - Apis mellifera):\n import mappy as mp\n alignment = mp.Aligner(\"/Users/kristinaulicna/Documents/Rotation_1_Rob_Lowe/GCF_000002195.4_Amel_4.5_genomic.fa.gz\",\n preset=\"map-ont\")\n for hit in alignment.map(readSeq):\n print (\"Hit parameters:\", \"\\t\", hit)\n\n #Convertion of the strand (readSeq) into positive orientation:\n if hit.strand == -1:\n query = readSeq[::-1]\n query.upper()\n query = str(query) \\\n .replace('T', '%temp1%').replace('A', 'T').replace('%temp1%', 'A') \\\n .replace('C', '%temp2%').replace('G', 'C').replace('%temp2%', 'G')\n print (\"Strand in negative orientation. Converted seq:\", query)\n else:\n query = readSeq\n query.upper()\n print (\"Strand in positive orientation. Unchanged seq:\", query)\n print ()\n\n #Change hit.q_st and hit.q_en values if strand was negative:\n if hit.strand == -1:\n reference = alignment.seq(hit.ctg, hit.r_st, hit.r_en)\n query_seq = query[len(query)-hit.q_en:len(query)-hit.q_st]\n else:\n reference = alignment.seq(hit.ctg, hit.r_st, hit.r_en)\n query_seq = query[hit.q_st:hit.q_en]\n print (\"Reference:\", \"\\t\", len(reference), \"\\t\", reference)\n print (\"Query seq:\", \"\\t\", len(query_seq), \"\\t\", query_seq)\n print ()\n\n #Breaking down of aligned Ref & Que sequences accoring to the Cigar Parameters:\n print (\"Alignment Analysis - Chunks with matches/mismatches, insertions and deletions:\")\n position_ref = 0\n position_que = 0\n for parameter in hit.cigar:\n if parameter[1] == 0:\n print(\"Ref:\", position_ref + 1, \":\", position_ref + parameter[0], \"\\t\",\n reference[position_ref:position_ref + parameter[0]])\n print(\"Que:\", position_que + 1, \":\", position_que + parameter[0], \"\\t\",\n query_seq[position_que:position_que + parameter[0]])\n position_ref += parameter[0]\n position_que += parameter[0]\n elif parameter[1] == 2:\n print(\"Ref:\", position_ref + 1, \":\", position_ref + parameter[0], \"\\t\",\n reference[position_ref:position_ref + parameter[0]])\n print(\"Que:\", position_que + 1, \":\", position_que + parameter[0], \"\\t\",\n len(query_seq[position_que:position_que + parameter[0]]) * \"-\")\n position_ref += parameter[0]\n elif parameter[1] == 1:\n print(\"Ref:\", position_ref + 1, \":\", position_ref + parameter[0], \"\\t\",\n len(reference[position_ref:position_ref + parameter[0]]) * \"-\")\n print(\"Que:\", position_que + 1, \":\", position_que + parameter[0], \"\\t\",\n query_seq[position_que:position_que + parameter[0]])\n position_que += parameter[0]\n print ()\n\n #Producing a continuous Ref & Que sequences (with respective in/del gaps):\n print (\"Continuous aligned sequences: Reference and Query with gaps:\")\n reference_with_gaps = \"\"\n position_ref = 0\n print (\"Reference:\", \"\\t\", \"( Length of seq:\", len(reference), \")\")\n for parameter in hit.cigar:\n if parameter[1] == 0:\n reference_with_gaps += reference[position_ref:position_ref + parameter[0]]\n position_ref += parameter[0]\n elif parameter[1] == 2:\n reference_with_gaps += reference[position_ref:position_ref + parameter[0]]\n position_ref += parameter[0]\n elif parameter[1] == 1:\n reference_with_gaps += len(reference[position_ref:position_ref + parameter[0]]) * \"-\"\n print (reference_with_gaps, \"\\t\", len(reference_with_gaps), \"\\t\", reference_with_gaps.count(\"-\"), \"insertions\")\n query_with_gaps = \"\"\n position_que = 0\n print (\"Query Seq:\", \"\\t\", \"( Length of seq:\", len(query_seq), \")\")\n for parameter in hit.cigar:\n if parameter[1] == 0:\n query_with_gaps += query_seq[position_que:position_que + parameter[0]]\n position_que += parameter[0]\n elif parameter[1] == 2:\n query_with_gaps += len(query_seq[position_que:position_que + parameter[0]]) * \"-\"\n elif parameter[1] == 1:\n query_with_gaps += query_seq[position_que:position_que + parameter[0]]\n position_que += parameter[0]\n print (query_with_gaps, \"\\t\", len(query_with_gaps), \"\\t\", query_with_gaps.count(\"-\"), \"deletions\")\n print ()\n\n # Summarizing the total number of matches/mismatches, insertions and deletions:\n mis_matches = 0\n insertions = 0\n deletions = 0\n for k in hit.cigar:\n if k[1] == 0:\n mis_matches += k[0]\n elif k[1] == 1:\n insertions += k[0]\n elif k[1] == 2:\n deletions += k[0]\n print (\"Number of Matches/Mismatches:\", mis_matches, \"\\t\", \"Number of Insertions:\", insertions, \"\\t\", \"Number of deletions:\", deletions)\n print ()\n\n # Prints gaps in the reference - i.e. insertions are included, this is not correct!\n print(\"R e f e r e n c e w i t h g a p s:\")\n position_on_contig = hit.r_st\n for base_ref, base_que in zip(reference_with_gaps, query_with_gaps):\n print(\"Position on contig:\", position_on_contig, \"\\t\", \"Ref Base:\", base_ref, \"\\t\", \"Que Base:\", base_que)\n position_on_contig += 1\n\n #Printing out each individual base in ref and seq on contig position:\n #(prints no gaps in the reference; modified for insertions)\n print(\"Dissection of each base with position on the contig:\")\n position_on_contig = hit.r_st\n for base_ref, base_que in zip(reference_with_gaps, query_with_gaps):\n if base_ref != \"-\":\n print(\"Position on contig:\", position_on_contig, \"\\t\", \"Ref Base:\", base_ref, \"\\t\", \"Que Base:\", base_que)\n position_on_contig += 1\n print ()\n\n # Match/mismatches check: in the aligned sequences:\n print(\"Summary of mismatches:\")\n position_contig_mismatch = hit.r_st\n mismatch_counter = 0\n for base_ref, base_que in zip(reference_with_gaps, query_with_gaps):\n if base_ref != \"-\" and base_que != \"-\":\n if base_ref != base_que:\n print(\"Mismatch at contig position:\", position_contig_mismatch, \"\\t\", base_ref, \"->\", base_que)\n mismatch_counter += 1\n elif base_ref == \"-\":\n position_contig_mismatch -= 1\n position_contig_mismatch += 1\n print (\"\\t\", \"Total mismatches:\", mismatch_counter)\n print ()\n return hit, reference, query_seq, reference_with_gaps, query_with_gaps\n\n\n # P O S I T I V E S T R A N D S :\n#AlignQueToRef(\"/Users/kristinaulicna/Documents/Rotation_1_Rob_Lowe/Nanopore sequences(fast5)/CLN_SMD_048987_20180430_FAH86090_MN27963_sequencing_run_BeeRapidNormal_81579_read_8_ch_239_strand.fast5\")\n#AlignQueToRef(\"/Users/kristinaulicna/Documents/Rotation_1_Rob_Lowe/Nanopore sequences(fast5)/CLN_SMD_048987_20180430_FAH86090_MN27963_sequencing_run_BeeRapidNormal_81579_read_6218_ch_429_strand.fast5\")\n\n # N E G A T I V E S T R A N D S :\n#AlignQueToRef(\"/Users/kristinaulicna/Documents/Rotation_1_Rob_Lowe/Nanopore sequences(fast5)/CLN_SMD_048987_20180430_FAH86090_MN27963_sequencing_run_BeeRapidNormal_81579_read_16_ch_305_strand.fast5\")\n#AlignQueToRef(\"/Users/kristinaulicna/Documents/Rotation_1_Rob_Lowe/Nanopore sequences(fast5)/CLN_SMD_048987_20180430_FAH86090_MN27963_sequencing_run_BeeRapidNormal_81579_read_15_ch_151_strand.fast5\")\n\nhelp(AlignQueToRef)\n\n\n\n# Calculation of the 'standardisation factor':\n\nimport h5py\nfile = h5py.File(\"/Users/kristinaulicna/Documents/Rotation_1_Rob_Lowe/Nanopore sequences(fast5)/CLN_SMD_048987_20180430_FAH86090_MN27963_sequencing_run_BeeRapidNormal_81579_read_6_ch_479_strand.fast5\", \"r\")\n\nimport numpy as np\nreadSeq = np.array(file[\"Analyses\"][\"Basecall_1D_000\"][\"BaseCalled_template\"][\"Fastq\"])\nreadSeq = str(readSeq)\nreadSeq = readSeq.split(\"\\\\n\")\nreadSeq = readSeq[1]\nprint (type(readSeq), \"\\t\", len(readSeq), \"\\t\", readSeq)\n\nprint(\"ReadSeq 1-120\", \"\\t\", readSeq[0:120])\n\nsignal = file[\"Raw\"][\"Reads\"][\"Read_6\"][\"Signal\"]\nprint (signal)\nprint (signal[0:600]) #because I need 5 data points for 1 base in range [0:120]\n\nevents = np.array(file[\"Analyses\"][\"Basecall_1D_000\"][\"BaseCalled_template\"][\"Events\"])\nprint(\"Events in total:\", len(events))\n#print (events[0:1000]) #range picked randomly, you don't know how many repetitive values there are per single base...\n\n\nfactor_list = list()\nsignal_order = 0\nevents_order = 0\nwhile signal_order < len(signal) and events_order < len(events):\n factor = (np.mean(signal[signal_order:signal_order+5])) / events[events_order][0]\n factor_list.append(factor)\n signal_order += 5\n events_order += 1\nmean_factor_value = np.mean(factor_list[0:len(factor_list)])\nstdev_factor_value = np.std(factor_list[0:len(factor_list)])\nprint ()\nprint (\"Mean Factor Value:\", mean_factor_value, \"\\t\", \"St.Dev.:\", stdev_factor_value)"
] |
[
[
"numpy.array",
"numpy.mean"
]
] |
chidperi/deep_learning_python
|
[
"d76b709754b5aa2b333fe3c41f499349af9b596b"
] |
[
"NNActivations.py"
] |
[
"# File name: NNActivations\n# Copyright 2017 Chidambaram Periakaruppan\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the\n# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\nimport numpy as np\n\n\ndef sigmoid(x):\n '''\n\n Args:\n x(ndarray)\n\n Returns:\n ndarray: Sigmoid activation and derivative of x.\n\n '''\n\n y = 1. / (1 + np.exp(-x))\n deriv = np.multiply(y, 1. - y)\n\n return y, deriv\n\n\ndef relu(x):\n '''\n\n Args:\n x(ndarray)\n\n Returns:\n ndarray: Relu activation and derivative of x.\n\n '''\n y = np.maximum(0, x)\n deriv = (x > 0) * 1\n\n return y, deriv\n\n\ndef leaky_relu(x, leak=0.1):\n '''\n\n Args:\n x(ndarray)\n leak(int)\n\n Returns:\n ndarray: Leaky relu activation and derivative of x.\n\n '''\n y = np.maximum(leak * x, x)\n deriv = (x < 0) * leak\n deriv = deriv + (x > 0) * 1\n\n return y, deriv\n\n\nactivation_functions = {'relu': relu, 'leaky_relu': leaky_relu, 'sigmoid': sigmoid}\n"
] |
[
[
"numpy.exp",
"numpy.maximum",
"numpy.multiply"
]
] |
marvahm12/PyEng
|
[
"4a4bf8f9d3bcd242243e91d86a598e19c7841601"
] |
[
"Image_Video/image_video_newcode/blur_new.py"
] |
[
"import cv2\nimport sys\nimport matplotlib.pyplot as plt\n\n\ndef blur_display(infile, nogui=False):\n # The first argument is the image\n image = cv2.imread(infile)\n\n #conver to grayscale\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n #blur it\n blurred_image = cv2.GaussianBlur(image, (7,7), 0)\n\n if nogui:\n cv2.imwrite('test_blurred.png', blurred_image)\n else:\n\n # Show all 3 images\n cv2.imwrite(\"Original_Image.png\", image)\n cv2.imwrite(\"Gray_Image.png\", gray_image)\n cv2.imwrite(\"Blurred_Image.png\", blurred_image)\n\n cv2.waitKey(0)\n\nif __name__ == \"__main__\":\n blur_display(sys.argv[1])\n plt.savefig('output/Original_Image.png')\n plt.savefig('output/Gray_Image.png')\n plt.savefig('output/Blurred_Image.png')"
] |
[
[
"matplotlib.pyplot.savefig"
]
] |
IBM/Simultaneous-diagonalization
|
[
"385545401395a2e07f109441db4751a5dcf8f0a4"
] |
[
"verify_diag_y.py"
] |
[
"# Copyright 2022 IBM Inc. All rights reserved\n# SPDX-License-Identifier: Apache2.0\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# This file is part of the code to reproduce the results in the paper:\n# E. van den Berg and Kristan Temme, \"Circuit optimization of Hamiltonian\n# simulation by simultaneous diagonalization of Pauli clusters,\" Quantum 4,\n# p. 322, 2020. https://doi.org/10.22331/q-2020-09-12-322\n\nfrom qiskit import *\nimport numpy as np\n\ny = np.asarray([[0,-1j],[1j,0]])\n\nangle = 0.3\n\n(Ev,Eb) = np.linalg.eig(y)\nexpy1 = np.dot(Eb,np.dot(np.diag(np.exp(1j*angle*Ev)), Eb.T.conj()))\n\ncircuit = QuantumCircuit(2)\ncircuit.s(0)\ncircuit.h(0)\ncircuit.x(0)\ncircuit.cx(0,1)\ncircuit.rz(-angle,1)\ncircuit.x(1)\ncircuit.rz(angle,1)\ncircuit.x(1)\ncircuit.cx(0,1)\ncircuit.x(0)\ncircuit.h(0)\ncircuit.sdg(0)\n\nbackend = Aer.get_backend('unitary_simulator')\nU = execute(circuit, backend).result().get_unitary()\nexpy2 = U[:2,:2]\nprint(\"Error = %s\" % np.linalg.norm(expy1 - expy2,'fro'))\n"
] |
[
[
"numpy.asarray",
"numpy.linalg.eig",
"numpy.exp",
"numpy.linalg.norm"
]
] |
leonardoedgar/raveutils
|
[
"b4f355249926ada87d863d8f25f2f6ae5d0c0a53"
] |
[
"tests/test_planning.py"
] |
[
"#! /usr/bin/env python\nimport unittest\nimport numpy as np\nimport openravepy as orpy\n# Tested package\nimport raveutils as ru\n\n\nclass Test_planning(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n np.set_printoptions(precision=6, suppress=True)\n scene = 'robots/puma.robot.xml'\n env = orpy.Environment()\n if not env.Load(scene):\n raise Exception('Could not load scene: {0}'.format(scene))\n robot = env.GetRobot('PumaGripper')\n manip = robot.SetActiveManipulator('arm')\n robot.SetActiveDOFs(manip.GetArmIndices())\n # Store the environment and robot in the class\n cls.env = env\n cls.robot = robot\n print('') # dummy line\n\n @classmethod\n def tearDownClass(cls):\n cls.env.Reset()\n cls.env.Destroy()\n\n def test_plan_cartesian_twist(self):\n np.random.seed(123)\n env = self.env\n robot = self.robot\n distances = [0.01, 0.02, 0.1, 0.5, 1.]\n for dist in distances:\n while True:\n q = ru.kinematics.random_joint_values(robot)\n with env:\n robot.SetActiveDOFValues(q)\n if not env.CheckCollision(robot):\n break\n twist = np.random.rand(6)*dist\n traj = ru.planning.plan_cartesian_twist(robot, twist, num_waypoints=10)\n\n def test_plan_constant_velocity_twist(self):\n np.random.seed(123)\n env = self.env\n robot = self.robot\n velocity = 0.002\n distances = [0.01, 0.02, 0.1, 0.5, 1.]\n for dist in distances:\n while True:\n q = ru.kinematics.random_joint_values(robot)\n with env:\n robot.SetActiveDOFValues(q)\n if not env.CheckCollision(robot):\n break\n twist = np.random.rand(6)*dist\n traj = ru.planning.plan_constant_velocity_twist(robot, twist, velocity)\n # Superpass velocity limits\n np.random.seed(123)\n velocity = 0.02\n twist = np.random.rand(6)*0.02\n traj = ru.planning.plan_constant_velocity_twist(robot, twist, velocity)\n\n def test_plan_to_joint_configuration(self):\n np.random.seed(123)\n robot = self.robot\n qgoal = ru.kinematics.random_joint_values(robot)\n # Test all the available planners\n traj1 = ru.planning.plan_to_joint_configuration(robot, qgoal, pname='BiRRT')\n self.assertNotEqual(traj1, None)\n traj = ru.planning.plan_to_joint_configuration(robot, qgoal,\n pname='BasicRRT')\n self.assertNotEqual(traj, None)\n # Test swaping option\n traj2 = ru.planning.plan_to_joint_configuration(robot, qgoal, pname='BiRRT',\n try_swap=True)\n self.assertNotEqual(traj2, None)\n # Test without post-processing\n traj3 = ru.planning.plan_to_joint_configuration(robot, qgoal, pname='BiRRT',\n max_ppiters=-1)\n self.assertNotEqual(traj3, None)\n\n def test_plan_to_pose(self):\n np.random.seed(123)\n env = self.env\n robot = self.robot\n manip = robot.GetActiveManipulator()\n # Start at a random configuration\n qstart = ru.kinematics.random_joint_values(robot)\n robot.SetActiveDOFValues(qstart)\n Tstart = manip.GetEndEffectorTransform()\n # In this example, we want to move at constant velocity to a pose that is 5 cm away from the start pose\n # The rotation is 10 degress in every component\n twist = np.zeros(6)\n twist[:3] = [0.05, 0.05, 0.05]\n twist[3:] = np.deg2rad([10, 10, 10])\n velocity = 0.1 # m/s^2\n traj = ru.planning.plan_constant_velocity_twist(robot, twist, velocity)\n # Use ros_trajectory_from_openrave to get the lastest joint config of the trajectory\n ros_traj = ru.planning.ros_trajectory_from_openrave(robot.GetName(), traj)\n qgoal = ros_traj.points[-1].positions\n robot.SetActiveDOFValues(qgoal)\n Tgoal = manip.GetEndEffectorTransform()\n # Check the goal config makes sense (don't expect to much precision, here we are 1.5mm off the goal)\n pos_diff = Tgoal[:3,3] - Tstart[:3,3]\n self.assertTrue(np.linalg.norm(pos_diff - twist[:3]) < 2e-3)\n\n def test_retime_trajectory(self):\n np.random.seed(123)\n robot = self.robot\n qgoal = ru.kinematics.random_joint_values(robot)\n traj = ru.planning.plan_to_joint_configuration(robot, qgoal, pname='BiRRT')\n # Test all the available retiming methods\n status = ru.planning.retime_trajectory(robot, traj,\n 'LinearTrajectoryRetimer')\n self.assertEqual(status, orpy.PlannerStatus.HasSolution)\n status = ru.planning.retime_trajectory(robot, traj,\n 'ParabolicTrajectoryRetimer')\n self.assertEqual(status, orpy.PlannerStatus.HasSolution)\n status = ru.planning.retime_trajectory(robot, traj,\n 'CubicTrajectoryRetimer')\n self.assertEqual(status, orpy.PlannerStatus.HasSolution)\n\n def test_ros_trajectory_from_openrave(self):\n np.random.seed(123)\n robot = self.robot\n qgoal = ru.kinematics.random_joint_values(robot)\n traj = ru.planning.plan_to_joint_configuration(robot, qgoal, pname='BiRRT')\n ros_traj = ru.planning.ros_trajectory_from_openrave(robot.GetName(), traj)\n # Check trajs durations\n ros_traj_duration = ros_traj.points[-1].time_from_start.to_sec()\n np.testing.assert_almost_equal(ros_traj_duration, traj.GetDuration())\n # Check num of waypoints\n self.assertEqual(len(ros_traj.points), traj.GetNumWaypoints())\n # Send trajectory with repeated waypoints\n waypoints = []\n for i in range(5):\n q = ru.kinematics.random_joint_values(robot)\n waypoints.append(q)\n waypoints.append(q)\n traj = ru.planning.trajectory_from_waypoints(robot, waypoints)\n status = ru.planning.retime_trajectory(robot, traj,\n 'ParabolicTrajectoryRetimer')\n ros_traj = ru.planning.ros_trajectory_from_openrave(robot.GetName(), traj)\n # Check trajs durations\n ros_traj_duration = ros_traj.points[-1].time_from_start.to_sec()\n np.testing.assert_almost_equal(ros_traj_duration, traj.GetDuration())\n # Check num of waypoints\n self.assertTrue(len(ros_traj.points) < traj.GetNumWaypoints())\n # Test corrupted trajectories: missing deltatime\n env = self.env\n robot_name = robot.GetName()\n traj_corrupted = orpy.RaveCreateTrajectory(env, '')\n spec = traj.GetConfigurationSpecification()\n values_group = spec.GetGroupFromName('joint_values {0}'.format(robot_name))\n velocities_group = spec.GetGroupFromName(\n 'joint_velocities {0}'.format(robot_name))\n deltatime_group = spec.GetGroupFromName('deltatime')\n spec.RemoveGroups('deltatime')\n traj_corrupted.Init(spec)\n for i in xrange(traj.GetNumWaypoints()):\n waypoint = traj.GetWaypoint(i).tolist()\n waypoint.pop(deltatime_group.offset)\n traj_corrupted.Insert(i, waypoint)\n ros_traj = ru.planning.ros_trajectory_from_openrave(robot.GetName(),\n traj_corrupted)\n self.assertEqual(ros_traj, None)\n # Test corrupted trajectories: missing joint_velocities\n manip = robot.GetActiveManipulator()\n spec = manip.GetArmConfigurationSpecification()\n traj_corrupted = orpy.RaveCreateTrajectory(env, '')\n traj_corrupted.Init(spec)\n for i in xrange(traj.GetNumWaypoints()):\n waypoint = traj.GetWaypoint(i).tolist()\n values_end = values_group.offset + values_group.dof\n traj_corrupted.Insert(i, waypoint[values_group.offset:values_end])\n ros_traj = ru.planning.ros_trajectory_from_openrave(robot.GetName(),\n traj_corrupted)\n self.assertEqual(ros_traj, None)\n # Test corrupted trajectories: missing joint_values\n traj_corrupted = orpy.RaveCreateTrajectory(env, '')\n spec = orpy.ConfigurationSpecification()\n indices = ' '.join(map(str,manip.GetArmIndices()))\n spec.AddGroup('joint_velocities {0} {1}'.format(robot_name, indices),\n robot.GetActiveDOF(), 'linear')\n traj_corrupted.Init(spec)\n for i in xrange(traj.GetNumWaypoints()):\n waypoint = traj.GetWaypoint(i).tolist()\n values_end = values_group.offset + values_group.dof\n traj_corrupted.Insert(i, waypoint[values_group.offset:values_end])\n ros_traj = ru.planning.ros_trajectory_from_openrave(robot.GetName(),\n traj_corrupted)\n self.assertEqual(ros_traj, None)\n\n def test_trajectory_from_waypoints(self):\n np.random.seed(123)\n robot = self.robot\n waypoints = []\n for i in range(5):\n waypoints.append(ru.kinematics.random_joint_values(robot))\n traj = ru.planning.trajectory_from_waypoints(robot, waypoints)\n self.assertEqual(traj.GetNumWaypoints(), len(waypoints))\n"
] |
[
[
"numpy.random.seed",
"numpy.set_printoptions",
"numpy.linalg.norm",
"numpy.deg2rad",
"numpy.random.rand",
"numpy.zeros"
]
] |
srikar1001/mymmdetection3d
|
[
"01592d4caeeb6b29df965533437d19b62b23da61"
] |
[
"mmdet3d/datasets/pipelines/loading.py"
] |
[
"import mmcv\nimport numpy as np\n\nfrom mmdet3d.core.points import BasePoints, get_points_type\nfrom mmdet.datasets.builder import PIPELINES\nfrom mmdet.datasets.pipelines import LoadAnnotations\n\n\[email protected]_module()\nclass LoadMultiViewImageFromFiles(object):\n \"\"\"Load multi channel images from a list of separate channel files.\n\n Expects results['img_filename'] to be a list of filenames.\n\n Args:\n to_float32 (bool): Whether to convert the img to float32.\n Defaults to False.\n color_type (str): Color type of the file. Defaults to 'unchanged'.\n \"\"\"\n\n def __init__(self, to_float32=False, color_type='unchanged'):\n self.to_float32 = to_float32\n self.color_type = color_type\n\n def __call__(self, results):\n \"\"\"Call function to load multi-view image from files.\n\n Args:\n results (dict): Result dict containing multi-view image filenames.\n\n Returns:\n dict: The result dict containing the multi-view image data. \\\n Added keys and values are described below.\n\n - filename (str): Multi-view image filenames.\n - img (np.ndarray): Multi-view image arrays.\n - img_shape (tuple[int]): Shape of multi-view image arrays.\n - ori_shape (tuple[int]): Shape of original image arrays.\n - pad_shape (tuple[int]): Shape of padded image arrays.\n - scale_factor (float): Scale factor.\n - img_norm_cfg (dict): Normalization configuration of images.\n \"\"\"\n filename = results['img_filename']\n img = np.stack(\n [mmcv.imread(name, self.color_type) for name in filename], axis=-1)\n if self.to_float32:\n img = img.astype(np.float32)\n results['filename'] = filename\n results['img'] = img\n results['img_shape'] = img.shape\n results['ori_shape'] = img.shape\n # Set initial values for default meta_keys\n results['pad_shape'] = img.shape\n results['scale_factor'] = 1.0\n num_channels = 1 if len(img.shape) < 3 else img.shape[2]\n results['img_norm_cfg'] = dict(\n mean=np.zeros(num_channels, dtype=np.float32),\n std=np.ones(num_channels, dtype=np.float32),\n to_rgb=False)\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n return \"{} (to_float32={}, color_type='{}')\".format(\n self.__class__.__name__, self.to_float32, self.color_type)\n\n\[email protected]_module()\nclass LoadPointsFromMultiSweeps(object):\n \"\"\"Load points from multiple sweeps.\n\n This is usually used for nuScenes dataset to utilize previous sweeps.\n\n Args:\n sweeps_num (int): Number of sweeps. Defaults to 10.\n load_dim (int): Dimension number of the loaded points. Defaults to 5.\n use_dim (list[int]): Which dimension to use. Defaults to [0, 1, 2, 4].\n file_client_args (dict): Config dict of file clients, refer to\n https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py\n for more details. Defaults to dict(backend='disk').\n pad_empty_sweeps (bool): Whether to repeat keyframe when\n sweeps is empty. Defaults to False.\n remove_close (bool): Whether to remove close points.\n Defaults to False.\n test_mode (bool): If test_model=True used for testing, it will not\n randomly sample sweeps but select the nearest N frames.\n Defaults to False.\n \"\"\"\n\n def __init__(self,\n sweeps_num=10,\n load_dim=5,\n use_dim=[0, 1, 2, 4],\n file_client_args=dict(backend='disk'),\n pad_empty_sweeps=False,\n remove_close=False,\n test_mode=False):\n self.load_dim = load_dim\n self.sweeps_num = sweeps_num\n self.use_dim = use_dim\n self.file_client_args = file_client_args.copy()\n self.file_client = None\n self.pad_empty_sweeps = pad_empty_sweeps\n self.remove_close = remove_close\n self.test_mode = test_mode\n\n def _load_points(self, pts_filename):\n \"\"\"Private function to load point clouds data.\n\n Args:\n pts_filename (str): Filename of point clouds data.\n\n Returns:\n np.ndarray: An array containing point clouds data.\n \"\"\"\n if self.file_client is None:\n self.file_client = mmcv.FileClient(**self.file_client_args)\n try:\n pts_bytes = self.file_client.get(pts_filename)\n points = np.frombuffer(pts_bytes, dtype=np.float32)\n except ConnectionError:\n mmcv.check_file_exist(pts_filename)\n if pts_filename.endswith('.npy'):\n points = np.load(pts_filename)\n else:\n points = np.fromfile(pts_filename, dtype=np.float32)\n return points\n\n def _remove_close(self, points, radius=1.0):\n \"\"\"Removes point too close within a certain radius from origin.\n\n Args:\n points (np.ndarray): Sweep points.\n radius (float): Radius below which points are removed.\n Defaults to 1.0.\n\n Returns:\n np.ndarray: Points after removing.\n \"\"\"\n if isinstance(points, np.ndarray):\n points_numpy = points\n elif isinstance(points, BasePoints):\n points_numpy = points.tensor.numpy()\n else:\n raise NotImplementedError\n x_filt = np.abs(points_numpy[:, 0]) < radius\n y_filt = np.abs(points_numpy[:, 1]) < radius\n not_close = np.logical_not(np.logical_and(x_filt, y_filt))\n return points[not_close]\n\n def __call__(self, results):\n \"\"\"Call function to load multi-sweep point clouds from files.\n\n Args:\n results (dict): Result dict containing multi-sweep point cloud \\\n filenames.\n\n Returns:\n dict: The result dict containing the multi-sweep points data. \\\n Added key and value are described below.\n\n - points (np.ndarray): Multi-sweep point cloud arrays.\n \"\"\"\n points = results['points']\n points.tensor[:, 4] = 0\n sweep_points_list = [points]\n ts = results['timestamp']\n if self.pad_empty_sweeps and len(results['sweeps']) == 0:\n for i in range(self.sweeps_num):\n if self.remove_close:\n sweep_points_list.append(self._remove_close(points))\n else:\n sweep_points_list.append(points)\n else:\n if len(results['sweeps']) <= self.sweeps_num:\n choices = np.arange(len(results['sweeps']))\n elif self.test_mode:\n choices = np.arange(self.sweeps_num)\n else:\n choices = np.random.choice(\n len(results['sweeps']), self.sweeps_num, replace=False)\n for idx in choices:\n sweep = results['sweeps'][idx]\n points_sweep = self._load_points(sweep['data_path'])\n points_sweep = np.copy(points_sweep).reshape(-1, self.load_dim)\n if self.remove_close:\n points_sweep = self._remove_close(points_sweep)\n sweep_ts = sweep['timestamp'] / 1e6\n points_sweep[:, :3] = points_sweep[:, :3] @ sweep[\n 'sensor2lidar_rotation'].T\n points_sweep[:, :3] += sweep['sensor2lidar_translation']\n points_sweep[:, 4] = ts - sweep_ts\n points_sweep = points.new_point(points_sweep)\n sweep_points_list.append(points_sweep)\n\n points = points.cat(sweep_points_list)\n points = points[:, self.use_dim]\n results['points'] = points\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n return f'{self.__class__.__name__}(sweeps_num={self.sweeps_num})'\n\n\[email protected]_module()\nclass PointSegClassMapping(object):\n \"\"\"Map original semantic class to valid category ids.\n\n Map valid classes as 0~len(valid_cat_ids)-1 and\n others as len(valid_cat_ids).\n\n Args:\n valid_cat_ids (tuple[int]): A tuple of valid category.\n \"\"\"\n\n def __init__(self, valid_cat_ids):\n self.valid_cat_ids = valid_cat_ids\n\n def __call__(self, results):\n \"\"\"Call function to map original semantic class to valid category ids.\n\n Args:\n results (dict): Result dict containing point semantic masks.\n\n Returns:\n dict: The result dict containing the mapped category ids. \\\n Updated key and value are described below.\n\n - pts_semantic_mask (np.ndarray): Mapped semantic masks.\n \"\"\"\n assert 'pts_semantic_mask' in results\n pts_semantic_mask = results['pts_semantic_mask']\n neg_cls = len(self.valid_cat_ids)\n\n for i in range(pts_semantic_mask.shape[0]):\n if pts_semantic_mask[i] in self.valid_cat_ids:\n converted_id = self.valid_cat_ids.index(pts_semantic_mask[i])\n pts_semantic_mask[i] = converted_id\n else:\n pts_semantic_mask[i] = neg_cls\n\n results['pts_semantic_mask'] = pts_semantic_mask\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += '(valid_cat_ids={})'.format(self.valid_cat_ids)\n return repr_str\n\n\[email protected]_module()\nclass NormalizePointsColor(object):\n \"\"\"Normalize color of points.\n\n Args:\n color_mean (list[float]): Mean color of the point cloud.\n \"\"\"\n\n def __init__(self, color_mean):\n self.color_mean = color_mean\n\n def __call__(self, results):\n \"\"\"Call function to normalize color of points.\n\n Args:\n results (dict): Result dict containing point clouds data.\n\n Returns:\n dict: The result dict containing the normalized points. \\\n Updated key and value are described below.\n\n - points (np.ndarray): Points after color normalization.\n \"\"\"\n points = results['points']\n assert points.shape[1] >= 6,\\\n f'Expect points have channel >=6, got {points.shape[1]}'\n points[:, 3:6] = points[:, 3:6] - np.array(self.color_mean) / 256.0\n results['points'] = points\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += '(color_mean={})'.format(self.color_mean)\n return repr_str\n\n\[email protected]_module()\nclass LoadPointsFromFile(object):\n \"\"\"Load Points From File.\n\n Load sunrgbd and scannet points from file.\n\n Args:\n load_dim (int): The dimension of the loaded points.\n Defaults to 6.\n coord_type (str): The type of coordinates of points cloud.\n Available options includes:\n - 'LIDAR': Points in LiDAR coordinates.\n - 'DEPTH': Points in depth coordinates, usually for indoor dataset.\n - 'CAMERA': Points in camera coordinates.\n use_dim (list[int]): Which dimensions of the points to be used.\n Defaults to [0, 1, 2]. For KITTI dataset, set use_dim=4\n or use_dim=[0, 1, 2, 3] to use the intensity dimension.\n shift_height (bool): Whether to use shifted height. Defaults to False.\n file_client_args (dict): Config dict of file clients, refer to\n https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py\n for more details. Defaults to dict(backend='disk').\n \"\"\"\n\n def __init__(self,\n coord_type,\n load_dim=6,\n use_dim=[0, 1, 2],\n shift_height=False,\n file_client_args=dict(backend='disk')):\n self.shift_height = shift_height\n if isinstance(use_dim, int):\n use_dim = list(range(use_dim))\n assert max(use_dim) < load_dim, \\\n f'Expect all used dimensions < {load_dim}, got {use_dim}'\n assert coord_type in ['CAMERA', 'LIDAR', 'DEPTH']\n\n self.coord_type = coord_type\n self.load_dim = load_dim\n self.use_dim = use_dim\n self.file_client_args = file_client_args.copy()\n self.file_client = None\n\n def _load_points(self, pts_filename):\n \"\"\"Private function to load point clouds data.\n\n Args:\n pts_filename (str): Filename of point clouds data.\n\n Returns:\n np.ndarray: An array containing point clouds data.\n \"\"\"\n if self.file_client is None:\n self.file_client = mmcv.FileClient(**self.file_client_args)\n try:\n pts_bytes = self.file_client.get(pts_filename)\n points = np.frombuffer(pts_bytes, dtype=np.float32)\n except ConnectionError:\n mmcv.check_file_exist(pts_filename)\n if pts_filename.endswith('.npy'):\n points = np.load(pts_filename)\n else:\n points = np.fromfile(pts_filename, dtype=np.float32)\n\n return points\n\n def __call__(self, results):\n \"\"\"Call function to load points data from file.\n\n Args:\n results (dict): Result dict containing point clouds data.\n\n Returns:\n dict: The result dict containing the point clouds data. \\\n Added key and value are described below.\n\n - points (np.ndarray): Point clouds data.\n \"\"\"\n pts_filename = results['pts_filename']\n #print(\"LKK pts_filename:\", pts_filename)\n points = self._load_points(pts_filename)\n #print(\"LKK points.shape:\", points.shape)\n #print(\"LKK use_dim:\", self.use_dim)\n points = points.reshape(-1, self.load_dim)\n points = points[:, self.use_dim]\n #print(\"LKK points after shape:\", points.shape)\n attribute_dims = None\n\n if self.shift_height:\n floor_height = np.percentile(points[:, 2], 0.99)\n height = points[:, 2] - floor_height\n points = np.concatenate([points, np.expand_dims(height, 1)], 1)\n attribute_dims = dict(height=3)\n\n points_class = get_points_type(self.coord_type)\n points = points_class(\n points, points_dim=points.shape[-1], attribute_dims=attribute_dims)\n results['points'] = points\n\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__ + '('\n repr_str += 'shift_height={}, '.format(self.shift_height)\n repr_str += 'file_client_args={}), '.format(self.file_client_args)\n repr_str += 'load_dim={}, '.format(self.load_dim)\n repr_str += 'use_dim={})'.format(self.use_dim)\n return repr_str\n\n\[email protected]_module()\nclass LoadAnnotations3D(LoadAnnotations):\n \"\"\"Load Annotations3D.\n\n Load instance mask and semantic mask of points and\n encapsulate the items into related fields.\n\n Args:\n with_bbox_3d (bool, optional): Whether to load 3D boxes.\n Defaults to True.\n with_label_3d (bool, optional): Whether to load 3D labels.\n Defaults to True.\n with_mask_3d (bool, optional): Whether to load 3D instance masks.\n for points. Defaults to False.\n with_seg_3d (bool, optional): Whether to load 3D semantic masks.\n for points. Defaults to False.\n with_bbox (bool, optional): Whether to load 2D boxes.\n Defaults to False.\n with_label (bool, optional): Whether to load 2D labels.\n Defaults to False.\n with_mask (bool, optional): Whether to load 2D instance masks.\n Defaults to False.\n with_seg (bool, optional): Whether to load 2D semantic masks.\n Defaults to False.\n poly2mask (bool, optional): Whether to convert polygon annotations\n to bitmasks. Defaults to True.\n file_client_args (dict): Config dict of file clients, refer to\n https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py\n for more details.\n \"\"\"\n\n def __init__(self,\n with_bbox_3d=True,\n with_label_3d=True,\n with_mask_3d=False,\n with_seg_3d=False,\n with_bbox=False,\n with_label=False,\n with_mask=False,\n with_seg=False,\n poly2mask=True,\n file_client_args=dict(backend='disk')):\n super().__init__(\n with_bbox,\n with_label,\n with_mask,\n with_seg,\n poly2mask,\n file_client_args=file_client_args)\n self.with_bbox_3d = with_bbox_3d\n self.with_label_3d = with_label_3d\n self.with_mask_3d = with_mask_3d\n self.with_seg_3d = with_seg_3d\n\n def _load_bboxes_3d(self, results):\n \"\"\"Private function to load 3D bounding box annotations.\n\n Args:\n results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.\n\n Returns:\n dict: The dict containing loaded 3D bounding box annotations.\n \"\"\"\n results['gt_bboxes_3d'] = results['ann_info']['gt_bboxes_3d']\n results['bbox3d_fields'].append('gt_bboxes_3d')\n return results\n\n def _load_labels_3d(self, results):\n \"\"\"Private function to load label annotations.\n\n Args:\n results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.\n\n Returns:\n dict: The dict containing loaded label annotations.\n \"\"\"\n results['gt_labels_3d'] = results['ann_info']['gt_labels_3d']\n return results\n\n def _load_masks_3d(self, results):\n \"\"\"Private function to load 3D mask annotations.\n\n Args:\n results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.\n\n Returns:\n dict: The dict containing loaded 3D mask annotations.\n \"\"\"\n pts_instance_mask_path = results['ann_info']['pts_instance_mask_path']\n\n if self.file_client is None:\n self.file_client = mmcv.FileClient(**self.file_client_args)\n try:\n mask_bytes = self.file_client.get(pts_instance_mask_path)\n pts_instance_mask = np.frombuffer(mask_bytes, dtype=np.int)\n except ConnectionError:\n mmcv.check_file_exist(pts_instance_mask_path)\n pts_instance_mask = np.fromfile(\n pts_instance_mask_path, dtype=np.long)\n\n results['pts_instance_mask'] = pts_instance_mask\n results['pts_mask_fields'].append('pts_instance_mask')\n return results\n\n def _load_semantic_seg_3d(self, results):\n \"\"\"Private function to load 3D semantic segmentation annotations.\n\n Args:\n results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.\n\n Returns:\n dict: The dict containing the semantic segmentation annotations.\n \"\"\"\n pts_semantic_mask_path = results['ann_info']['pts_semantic_mask_path']\n\n if self.file_client is None:\n self.file_client = mmcv.FileClient(**self.file_client_args)\n try:\n mask_bytes = self.file_client.get(pts_semantic_mask_path)\n # add .copy() to fix read-only bug\n pts_semantic_mask = np.frombuffer(mask_bytes, dtype=np.int).copy()\n except ConnectionError:\n mmcv.check_file_exist(pts_semantic_mask_path)\n pts_semantic_mask = np.fromfile(\n pts_semantic_mask_path, dtype=np.long)\n\n results['pts_semantic_mask'] = pts_semantic_mask\n results['pts_seg_fields'].append('pts_semantic_mask')\n return results\n\n def __call__(self, results):\n \"\"\"Call function to load multiple types annotations.\n\n Args:\n results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.\n\n Returns:\n dict: The dict containing loaded 3D bounding box, label, mask and\n semantic segmentation annotations.\n \"\"\"\n results = super().__call__(results)\n if self.with_bbox_3d:\n results = self._load_bboxes_3d(results)\n if results is None:\n return None\n if self.with_label_3d:\n results = self._load_labels_3d(results)\n if self.with_mask_3d:\n results = self._load_masks_3d(results)\n if self.with_seg_3d:\n results = self._load_semantic_seg_3d(results)\n\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n indent_str = ' '\n repr_str = self.__class__.__name__ + '(\\n'\n repr_str += f'{indent_str}with_bbox_3d={self.with_bbox_3d}, '\n repr_str += f'{indent_str}with_label_3d={self.with_label_3d}, '\n repr_str += f'{indent_str}with_mask_3d={self.with_mask_3d}, '\n repr_str += f'{indent_str}with_seg_3d={self.with_seg_3d}, '\n repr_str += f'{indent_str}with_bbox={self.with_bbox}, '\n repr_str += f'{indent_str}with_label={self.with_label}, '\n repr_str += f'{indent_str}with_mask={self.with_mask}, '\n repr_str += f'{indent_str}with_seg={self.with_seg}, '\n repr_str += f'{indent_str}poly2mask={self.poly2mask})'\n return repr_str\n"
] |
[
[
"numpy.fromfile",
"numpy.expand_dims",
"numpy.abs",
"numpy.arange",
"numpy.percentile",
"numpy.ones",
"numpy.frombuffer",
"numpy.copy",
"numpy.load",
"numpy.array",
"numpy.logical_and",
"numpy.zeros"
]
] |
AmazingDD/IF4RecSys
|
[
"91b93fa53e7d9b8f5cb78afcd413ad53fe7f16ad"
] |
[
"src/scalability/offline.py"
] |
[
"import pickle\r\nimport pandas as pd\r\nfrom surprise import SVD, Reader, Dataset\r\n\r\nprint('Now offline-train Amazon')\r\ndf = pd.read_csv('./exp_data/amazon_exp.csv')\r\n\r\nreader = Reader()\r\ndata = Dataset.load_from_df(\r\n df=df[['user_id', 'item_id', 'rating']], reader=reader, rating_scale=(1, 5))\r\ntrain_set = data.build_full_trainset()\r\n\r\nraw_ratings = [(uid, iid, float(r)) for (uid, iid, r, time) in df.itertuples(index=False)]\r\nraw2inner_id_users = {}\r\nraw2inner_id_items = {}\r\ncurrent_u_index = 0\r\ncurrent_i_index = 0\r\nfor urid, irid, r in raw_ratings:\r\n try:\r\n uid = raw2inner_id_users[urid]\r\n except KeyError:\r\n uid = current_u_index\r\n raw2inner_id_users[urid] = current_u_index\r\n current_u_index += 1\r\n try:\r\n iid = raw2inner_id_items[irid]\r\n except:\r\n iid = current_i_index\r\n raw2inner_id_items[irid] = current_i_index\r\n current_i_index += 1\r\nuser_dict = {val: key for key, val in raw2inner_id_users.items()}\r\nitem_dict = {val: key for key, val in raw2inner_id_items.items()}\r\n\r\npickle.dump(user_dict, open('./offline_result/user_dict.amazon', 'wb'))\r\npickle.dump(item_dict, open('./offline_result/item_dict.amazon', 'wb'))\r\n\r\n\r\n\r\n\r\nalgo = SVD(biased=False)\r\nalgo.fit(train_set)\r\nP = algo.pu\r\nQ = algo.qi\r\n\r\namazon_rate_dict = dict()\r\nfor u, i, r in train_set.all_ratings():\r\n amazon_rate_dict[(u, i)] = r\r\n\r\npickle.dump(P, open('./offline_result/ofl_P.amazon', 'wb'))\r\npickle.dump(Q, open('./offline_result/ofl_Q.amazon', 'wb'))\r\npickle.dump(amazon_rate_dict, open('./offline_result/ofl_R.amazon', 'wb'))\r\n\r\nprint('Now offline-train Yelp')\r\ndf = pd.read_csv('./exp_data/yelp_exp.csv')\r\n\r\nreader = Reader()\r\ndata = Dataset.load_from_df(\r\n df=df[['user_id', 'item_id', 'rating']], reader=reader, rating_scale=(1, 5))\r\ntrain_set = data.build_full_trainset()\r\n\r\nraw_ratings = [(uid, iid, float(r)) for (uid, iid, r, time) in df.itertuples(index=False)]\r\nraw2inner_id_users = {}\r\nraw2inner_id_items = {}\r\ncurrent_u_index = 0\r\ncurrent_i_index = 0\r\nfor urid, irid, r in raw_ratings:\r\n try:\r\n uid = raw2inner_id_users[urid]\r\n except KeyError:\r\n uid = current_u_index\r\n raw2inner_id_users[urid] = current_u_index\r\n current_u_index += 1\r\n try:\r\n iid = raw2inner_id_items[irid]\r\n except:\r\n iid = current_i_index\r\n raw2inner_id_items[irid] = current_i_index\r\n current_i_index += 1\r\nuser_dict = {val: key for key, val in raw2inner_id_users.items()}\r\nitem_dict = {val: key for key, val in raw2inner_id_items.items()}\r\n\r\npickle.dump(user_dict, open('./offline_result/user_dict.yelp', 'wb'))\r\npickle.dump(item_dict, open('./offline_result/item_dict.yelp', 'wb'))\r\n\r\nalgo = SVD(biased=False)\r\nalgo.fit(train_set)\r\nP = algo.pu\r\nQ = algo.qi\r\n\r\nyelp_rate_dict = dict()\r\nfor u, i, r in train_set.all_ratings():\r\n yelp_rate_dict[(u, i)] = r\r\n\r\npickle.dump(P, open('./offline_result/ofl_P.yelp', 'wb'))\r\npickle.dump(Q, open('./offline_result/ofl_Q.yelp', 'wb'))\r\npickle.dump(yelp_rate_dict, open('./offline_result/ofl_R.yelp', 'wb'))\r\n"
] |
[
[
"pandas.read_csv"
]
] |
hadivafaii/network-portrait-divergence
|
[
"b6fdfbb607ffadbd60dccc2f7ae306351611a1a4"
] |
[
"example_use.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# example_use.py\n# Jim Bagrow\n# Last Modified: 2018-04-22\n\nimport sys, os\nimport itertools\nimport networkx as nx\nimport numpy as np\nfrom portrait_divergence import portrait_divergence\n\n\n# make n ER graphs and n BA graphs:\nn = 10\nlist_ER = [ nx.erdos_renyi_graph(100, 3/99) for _ in range(n) ]\nlist_BA = [ nx.barabasi_albert_graph(100, 3) for _ in range(n) ]\n\n\n# compare every pair of ER graphs:\nDjs_sameER = []\nfor ERi, ERj in itertools.combinations(list_ER, 2):\n Djs = portrait_divergence(ERi, ERj)\n Djs_sameER.append(Djs)\n\n# compare every pair of BA graphs:\nDjs_sameBA = []\nfor BAi, BAj in itertools.combinations(list_BA, 2):\n Djs = portrait_divergence(BAi, BAj)\n Djs_sameBA.append(Djs)\n\n# compare every ER with every BA:\nDjs_ERvBA = []\nfor ER, BA in itertools.product(list_ER, list_BA):\n Djs = portrait_divergence(ER, BA)\n Djs_ERvBA.append(Djs)\n\n\ntry:\n import matplotlib.pyplot as plt\nexcept ImportError:\n sys.exit(0)\n\n# plot histograms:\nhargs = dict(bins='auto', density=True, histtype='stepfilled')\nplt.hist(Djs_sameER, label='Same ER', alpha=0.7, **hargs)\nplt.hist(Djs_sameBA, label='Same BA', alpha=0.6, **hargs)\nplt.hist(Djs_ERvBA, label='ER vs. BA', alpha=0.7, **hargs)\n\nplt.xlabel(\"Portrait divergence $D_\\mathrm{JS}$\")\nplt.ylabel(\"Prob. density\")\nplt.legend()\nplt.tight_layout()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] |
Lee-zix/MARLPaR
|
[
"79269139709ade3d7299eef91cfd6d19bb9e33b3"
] |
[
"code/model/trainer.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom tqdm import tqdm\nimport json\nimport time\nimport os\nimport logging\nimport numpy as np\nimport tensorflow as tf\nfrom code.model.agent import Agent\nfrom code.options import read_options\nfrom code.model.environment import env\nimport codecs\nfrom collections import defaultdict\nimport gc\nimport resource\nimport sys\nfrom code.model.baseline import ReactiveBaseline\nfrom code.model.nell_eval import nell_eval\nfrom scipy.misc import logsumexp as lse\n\nlogger = logging.getLogger()\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\n\nclass Trainer(object):\n def __init__(self, params):\n\n # transfer parameters to self\n for key, val in params.items(): setattr(self, key, val);\n\n self.agent = Agent(params)\n self.save_path = None\n self.train_environment = env(params, 'train')\n self.dev_test_environment = env(params, 'dev')\n self.test_test_environment = env(params, 'test')\n self.test_environment = self.dev_test_environment\n self.rev_relation_vocab = self.train_environment.grapher.rev_relation_vocab\n self.rev_entity_vocab = self.train_environment.grapher.rev_entity_vocab\n self.max_hits_at_10 = 0\n self.ePAD = self.entity_vocab['PAD']\n self.rPAD = self.relation_vocab['PAD']\n # optimize\n self.baseline = ReactiveBaseline(l=self.Lambda)\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n\n\n def calc_reinforce_loss(self):\n loss = tf.stack(self.per_example_loss, axis=1) # [B, T]\n\n self.tf_baseline = self.baseline.get_baseline_value()\n # self.pp = tf.Print(self.tf_baseline)\n # multiply with rewards\n final_reward = self.cum_discounted_reward - self.tf_baseline\n # reward_std = tf.sqrt(tf.reduce_mean(tf.square(final_reward))) + 1e-5 # constant addded for numerical stability\n reward_mean, reward_var = tf.nn.moments(final_reward, axes=[0, 1]) # Calculate\n # statistical moments;\n # Constant added for numerical stability\n reward_std = tf.sqrt(reward_var) + 1e-6\n final_reward = tf.div(final_reward - reward_mean, reward_std)\n\n loss = tf.multiply(loss, final_reward) # [B, T] Returns x * y element-wise\n self.loss_before_reg = loss\n\n total_loss = tf.reduce_mean(loss) - self.decaying_beta * self.entropy_reg_loss(self.per_example_logits) # scalar\n\n return total_loss\n\n # Loss for entity selection, changing decaying_beta loss\n def calc_reinforce_ent_loss(self):\n loss = tf.stack(self.per_example_ent_loss, axis=1) # [B, T]\n\n self.tf_baseline = self.baseline.get_baseline_value()\n # self.pp = tf.Print(self.tf_baseline)\n # multiply with rewards\n final_reward = self.cum_discounted_reward - self.tf_baseline\n # reward_std = tf.sqrt(tf.reduce_mean(tf.square(final_reward))) + 1e-5 # constant addded for numerical stability\n reward_mean, reward_var = tf.nn.moments(final_reward, axes=[0, 1])\n # Constant added for numerical stability\n reward_std = tf.sqrt(reward_var) + 1e-6\n final_reward = tf.div(final_reward - reward_mean, reward_std)\n\n loss = tf.multiply(loss, final_reward) # [B, T] Returns x * y element-wise\n self.loss_before_reg = loss\n\n total_loss = tf.reduce_mean(loss) - self.decaying_beta * self.entropy_reg_loss(self.per_example_ent_logits) # scalar\n\n return total_loss\n\n def entropy_reg_loss(self, all_logits):\n all_logits = tf.stack(all_logits, axis=2) # [B, MAX_NUM_ACTIONS, T]\n entropy_policy = - tf.reduce_mean(tf.reduce_sum(tf.multiply(tf.exp(all_logits), all_logits), axis=1)) # scalar\n return entropy_policy\n\n def initialize(self, restore=None, sess=None): # construct cal graph\n\n logger.info(\"Creating TF graph...\")\n self.candidate_relation_weight_sequence = []\n self.candidate_relation_sequence = []\n self.candidate_entity_sequence = []\n self.input_path = []\n self.first_state_of_test = tf.placeholder(tf.bool, name=\"is_first_state_of_test\")\n self.query_relation = tf.placeholder(tf.int32, [None], name=\"query_relation\")\n self.range_arr = tf.placeholder(tf.int32, shape=[None, ])\n self.global_step = tf.Variable(0, trainable=False)\n ## beta --Base learning rate self.global_step --Current index into the dataset. 200\n # --Decay step 0.90 --Decay rate\n self.decaying_beta = tf.train.exponential_decay(self.beta, self.global_step,\n 200, 0.90, staircase=False)\n self.entity_sequence = []\n\n # to feed in the discounted reward tensor\n self.cum_discounted_reward = tf.placeholder(tf.float32, [None, self.path_length],\n name=\"cumulative_discounted_reward\")\n\n self.cum_discounted_reward_ent = tf.placeholder(tf.float32, [None, self.path_length],\n name=\"cumulative_discounted_reward_ent\")\n\n for t in range(self.path_length):\n next_possible_relations = tf.placeholder(tf.int32, [None, self.max_num_actions], #batchsize * max_num_actions\n name=\"next_relations_{}\".format(t))\n next_possible_relations_weight = tf.placeholder(tf.float32, [None, self.max_num_actions],\n # batchsize * max_num_actions\n name=\"next_relations_{}\".format(t))\n next_possible_entities = tf.placeholder(tf.int32, [None, self.max_num_actions], #batchsize * max_num_actions\n name=\"next_entities_{}\".format(t))\n input_label_relation = tf.placeholder(tf.int32, [None], name=\"input_label_relation_{}\".format(t)) #batchsize\n start_entities = tf.placeholder(tf.int32, [None, ]) #batchsize\n self.input_path.append(input_label_relation) #t * batchsize\n self.candidate_relation_sequence.append(next_possible_relations) #t * batchsize * max_num_actions\n self.candidate_relation_weight_sequence.append(next_possible_relations_weight)\n self.candidate_entity_sequence.append(next_possible_entities) #t * batchsize * max_num_actions\n self.entity_sequence.append(start_entities)\n self.loss_before_reg = tf.constant(0.0)\n self.per_example_loss, self.per_example_ent_loss, self.per_example_logits, self.per_example_ent_logits, self.action_idx = self.agent(\n self.candidate_relation_weight_sequence,\n self.candidate_relation_sequence,\n self.candidate_entity_sequence, self.entity_sequence,\n self.input_path,\n self.query_relation, self.range_arr, self.first_state_of_test, self.path_length)\n\n\n self.loss_op = self.calc_reinforce_loss()\n\n self.loss_ent_op = self.calc_reinforce_ent_loss()\n\n # backprop\n self.train_op = self.bp(self.loss_op)\n\n self.train_op_ent = self.bp_ent(self.loss_ent_op)\n\n # Building the test graph\n self.prev_state = tf.placeholder(tf.float32, self.agent.get_mem_shape(), name=\"memory_of_agent\")\n self.prev_relation = tf.placeholder(tf.int32, [None, ], name=\"previous_relation\")\n self.query_embedding = tf.nn.embedding_lookup(self.agent.relation_lookup_table, self.query_relation) # [B, 2D]\n layer_state = tf.unstack(self.prev_state, self.LSTM_layers)\n formated_state = [tf.unstack(s, 2) for s in layer_state]\n self.next_relations = tf.placeholder(tf.int32, shape=[None, self.max_num_actions])\n self.next_relations_weight = tf.placeholder(tf.float32, shape=[None, self.max_num_actions])\n self.next_entities = tf.placeholder(tf.int32, shape=[None, self.max_num_actions])\n\n self.current_entities = tf.placeholder(tf.int32, shape=[None,])\n\n\n\n with tf.variable_scope(\"policy_steps_unroll\") as scope:\n scope.reuse_variables()\n self.test_loss, self.test_ent_loss, test_state, self.test_logits, self.test_ent_logits, self.test_action_idx, self.chosen_relation = self.agent.step(self.next_relations_weight,\n self.next_relations, self.next_entities, formated_state, self.prev_relation, self.query_embedding,\n self.current_entities, self.input_path[0], self.range_arr, self.first_state_of_test)\n self.test_state = tf.stack(test_state)\n\n logger.info('TF Graph creation done..')\n self.model_saver = tf.train.Saver(max_to_keep=2)\n\n # return the variable initializer Op.\n if not restore:\n return tf.global_variables_initializer()\n else:\n return self.model_saver.restore(sess, restore)\n\n\n\n def initialize_pretrained_embeddings(self, sess):\n if self.pretrained_embeddings_action != '':\n embeddings = np.loadtxt(open(self.pretrained_embeddings_action))\n _ = sess.run((self.agent.relation_embedding_init),\n feed_dict={self.agent.action_embedding_placeholder: embeddings})\n if self.pretrained_embeddings_entity != '':\n embeddings = np.loadtxt(open(self.pretrained_embeddings_entity))\n _ = sess.run((self.agent.entity_embedding_init),\n feed_dict={self.agent.entity_embedding_placeholder: embeddings})\n\n def bp(self, cost):\n self.baseline.update(tf.reduce_mean(self.cum_discounted_reward))\n tvars = tf.trainable_variables()\n grads = tf.gradients(cost, tvars)\n grads, _ = tf.clip_by_global_norm(grads, self.grad_clip_norm)\n train_op = self.optimizer.apply_gradients(zip(grads, tvars))\n with tf.control_dependencies([train_op]): # see https://github.com/tensorflow/tensorflow/issues/1899\n self.dummy = tf.constant(0)\n return train_op\n\n def bp_ent(self, cost):\n self.baseline.update(tf.reduce_mean(self.cum_discounted_reward))\n tvars = tf.trainable_variables()\n grads = tf.gradients(cost, tvars)\n grads, _ = tf.clip_by_global_norm(grads, self.grad_clip_norm)\n train_op = self.optimizer.apply_gradients(zip(grads, tvars))\n with tf.control_dependencies([train_op]): # see https://github.com/tensorflow/tensorflow/issues/1899\n self.dummy1 = tf.constant(0)\n return train_op\n\n\n\n def calc_cum_discounted_reward(self, rewards):\n \"\"\"\n calculates the cumulative discounted reward.\n :param rewards:\n :param T:\n :param gamma:\n :return:\n \"\"\"\n running_add = np.zeros([rewards.shape[0]]) # [B]\n cum_disc_reward = np.zeros([rewards.shape[0], self.path_length]) # [B, T]\n cum_disc_reward[:,self.path_length - 1] = rewards # set the last time step to the reward received at the last state\n for t in reversed(range(self.path_length)):\n running_add = self.gamma * running_add + cum_disc_reward[:, t]\n cum_disc_reward[:, t] = running_add\n return cum_disc_reward\n\n def gpu_io_setup(self):\n # create fetches for partial_run_setup\n fetches = self.per_example_loss + self.per_example_ent_loss + self.action_idx + [self.loss_op] + self.per_example_logits + self.per_example_ent_logits + [self.loss_ent_op]+ [self.dummy] + [self.dummy1]\n feeds = [self.first_state_of_test] + self.candidate_relation_weight_sequence + self.candidate_relation_sequence+ self.candidate_entity_sequence + self.input_path + \\\n [self.query_relation] + [self.cum_discounted_reward] + [self.range_arr] + self.entity_sequence\n\n\n feed_dict = [{} for _ in range(self.path_length)]\n\n feed_dict[0][self.first_state_of_test] = False\n feed_dict[0][self.query_relation] = None\n feed_dict[0][self.range_arr] = np.arange(self.batch_size*self.num_rollouts)\n for i in range(self.path_length):\n feed_dict[i][self.input_path[i]] = np.zeros(self.batch_size * self.num_rollouts) # placebo\n feed_dict[i][self.candidate_relation_weight_sequence[i]] = None\n feed_dict[i][self.candidate_relation_sequence[i]] = None\n feed_dict[i][self.candidate_entity_sequence[i]] = None\n feed_dict[i][self.entity_sequence[i]] = None\n\n return fetches, feeds, feed_dict\n\n def train(self, sess):\n # import pdb\n # pdb.set_trace()\n fetches, feeds, feed_dict = self.gpu_io_setup()\n\n train_loss = 0.0\n start_time = time.time()\n self.batch_counter = 0\n for episode in self.train_environment.get_episodes():\n\n self.batch_counter += 1\n h = sess.partial_run_setup(fetches=fetches, feeds=feeds)\n feed_dict[0][self.query_relation] = episode.get_query_relation()\n\n # get initial state\n state = episode.get_state()\n # for each time step\n loss_before_regularization = []\n loss_before_regularization_ent = []\n logits = []\n logits_ent = []\n for i in range(self.path_length):\n feed_dict[i][self.candidate_relation_weight_sequence[i]] = state['next_relations_weight']\n feed_dict[i][self.candidate_relation_sequence[i]] = state['next_relations']\n feed_dict[i][self.candidate_entity_sequence[i]] = state['next_entities']\n feed_dict[i][self.entity_sequence[i]] = state['current_entities']\n per_example_loss, per_example_ent_loss, per_example_logits, per_example_ent_logits, idx = sess.partial_run(h, [self.per_example_loss[i], self.per_example_ent_loss[i], self.per_example_logits[i], self.per_example_ent_logits[i], self.action_idx[i]],\n feed_dict=feed_dict[i])\n loss_before_regularization.append(per_example_loss)\n loss_before_regularization_ent.append(per_example_ent_loss)\n logits.append(per_example_logits)\n logits_ent.append(per_example_ent_logits)\n # action = np.squeeze(action, axis=1) # [B,]\n state = episode(idx)\n loss_before_regularization = np.stack(loss_before_regularization, axis=1)\n loss_before_regularization_ent = np.stack(loss_before_regularization_ent, axis=1)\n\n # get the final reward from the environment\n rewards = episode.get_reward()\n\n # computed cumulative discounted reward\n cum_discounted_reward = self.calc_cum_discounted_reward(rewards) # [B, T]\n\n\n\n # backprop\n batch_total_loss, _ = sess.partial_run(h, [self.loss_op, self.dummy],\n feed_dict={self.cum_discounted_reward: cum_discounted_reward})\n\n # backprop2\n #batch_total_ent_loss, _ = sess.partial_run(h, [self.loss_ent_op,self.dummy1 ],feed_dict=None)\n\n # print statistics\n train_loss = 0.98 * train_loss + 0.02 * batch_total_loss\n avg_reward = np.mean(rewards)\n # now reshape the reward to [orig_batch_size, num_rollouts], I want to calculate for how many of the\n # entity pair, atleast one of the path get to the right answer\n reward_reshape = np.reshape(rewards, (self.batch_size, self.num_rollouts)) # [orig_batch, num_rollouts]\n reward_reshape = np.sum(reward_reshape, axis=1) # [orig_batch]\n reward_reshape = (reward_reshape > 0)\n num_ep_correct = np.sum(reward_reshape)\n if np.isnan(train_loss):\n raise ArithmeticError(\"Error in computing loss\")\n\n logger.info(\"batch_counter: {0:4d}, num_hits: {1:7.4f}, avg. reward per batch {2:7.4f}, \"\n \"num_ep_correct {3:4d}, avg_ep_correct {4:7.4f}, train loss {5:7.4f}\".\n format(self.batch_counter, np.sum(rewards), avg_reward, num_ep_correct,\n (num_ep_correct / self.batch_size),\n train_loss))\n\n if self.batch_counter%self.eval_every == 0:\n with open(self.output_dir + '/scores.txt', 'a') as score_file:\n score_file.write(\"Score for iteration \" + str(self.batch_counter) + \"\\n\")\n os.mkdir(self.path_logger_file + \"/\" + str(self.batch_counter))\n self.path_logger_file_ = self.path_logger_file + \"/\" + str(self.batch_counter) + \"/paths\"\n\n\n self.test_environment = self.dev_test_environment\n self.test(sess, beam=True, print_paths=False)\n self.test_environment = self.test_test_environment\n self.test(sess, beam=True, print_paths=False)\n\n logger.info('Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n\n gc.collect()\n if self.batch_counter >= self.total_iterations:\n break\n\n def test(self, sess, beam=False, print_paths=False, save_model = True, auc = False):\n batch_counter = 0\n paths = defaultdict(list)\n answers = []\n feed_dict = {}\n all_final_reward_1 = 0\n all_final_reward_3 = 0\n all_final_reward_5 = 0\n all_final_reward_10 = 0\n all_final_reward_20 = 0\n auc = 0\n\n total_examples = self.test_environment.total_no_examples\n for episode in tqdm(self.test_environment.get_episodes()):\n batch_counter += 1\n\n temp_batch_size = episode.no_examples\n\n self.qr = episode.get_query_relation()\n feed_dict[self.query_relation] = self.qr\n # set initial beam probs\n beam_probs = np.zeros((temp_batch_size * self.test_rollouts, 1))\n # get initial state\n state = episode.get_state()\n mem = self.agent.get_mem_shape()\n agent_mem = np.zeros((mem[0], mem[1], temp_batch_size*self.test_rollouts, mem[3]) ).astype('float32')\n previous_relation = np.ones((temp_batch_size * self.test_rollouts, ), dtype='int64') * self.relation_vocab[\n 'DUMMY_START_RELATION']\n feed_dict[self.range_arr] = np.arange(temp_batch_size * self.test_rollouts)\n feed_dict[self.input_path[0]] = np.zeros(temp_batch_size * self.test_rollouts)\n\n ####logger code####\n if print_paths:\n self.entity_trajectory = []\n self.relation_trajectory = []\n ####################\n\n self.log_probs = np.zeros((temp_batch_size*self.test_rollouts,)) * 1.0\n\n # for each time step\n for i in range(self.path_length):\n if i == 0:\n feed_dict[self.first_state_of_test] = True\n feed_dict[self.next_relations] = state['next_relations']\n feed_dict[self.next_relations_weight] = state['next_relations_weight']\n feed_dict[self.next_entities] = state['next_entities']\n feed_dict[self.current_entities] = state['current_entities']\n feed_dict[self.prev_state] = agent_mem\n feed_dict[self.prev_relation] = previous_relation\n\n loss, agent_mem, test_scores, test_action_idx, chosen_relation = sess.run(\n [ self.test_loss, self.test_state, self.test_logits, self.test_action_idx, self.chosen_relation],\n feed_dict=feed_dict)\n\n\n if beam:\n k = self.test_rollouts\n new_scores = test_scores + beam_probs\n if i == 0:\n idx = np.argsort(new_scores)\n idx = idx[:, -k:]\n ranged_idx = np.tile([b for b in range(k)], temp_batch_size)\n idx = idx[np.arange(k*temp_batch_size), ranged_idx]\n else:\n idx = self.top_k(new_scores, k)\n\n y = idx//self.max_num_actions\n x = idx%self.max_num_actions\n\n y += np.repeat([b*k for b in range(temp_batch_size)], k)\n state['current_entities'] = state['current_entities'][y]\n state['next_relations'] = state['next_relations'][y,:]\n state['next_relations_weight'] = state['next_relations_weight'][y, :]\n state['next_entities'] = state['next_entities'][y, :]\n agent_mem = agent_mem[:, :, y, :]\n test_action_idx = x\n chosen_relation = state['next_relations'][np.arange(temp_batch_size*k), x]\n beam_probs = new_scores[y, x]\n beam_probs = beam_probs.reshape((-1, 1))\n if print_paths:\n for j in range(i):\n self.entity_trajectory[j] = self.entity_trajectory[j][y]\n self.relation_trajectory[j] = self.relation_trajectory[j][y]\n previous_relation = chosen_relation\n\n ####logger code####\n if print_paths:\n self.entity_trajectory.append(state['current_entities'])\n self.relation_trajectory.append(chosen_relation)\n ####################\n state = episode(test_action_idx)\n self.log_probs += test_scores[np.arange(self.log_probs.shape[0]), test_action_idx]\n if beam:\n self.log_probs = beam_probs\n\n ####Logger code####\n\n if print_paths:\n self.entity_trajectory.append(\n state['current_entities'])\n\n\n # ask environment for final reward\n rewards = episode.get_reward() # [B*test_rollouts]\n reward_reshape = np.reshape(rewards, (temp_batch_size, self.test_rollouts)) # [orig_batch, test_rollouts]\n self.log_probs = np.reshape(self.log_probs, (temp_batch_size, self.test_rollouts))\n sorted_indx = np.argsort(-self.log_probs)\n final_reward_1 = 0\n final_reward_3 = 0\n final_reward_5 = 0\n final_reward_10 = 0\n final_reward_20 = 0\n AP = 0\n ce = episode.state['current_entities'].reshape((temp_batch_size, self.test_rollouts))\n se = episode.start_entities.reshape((temp_batch_size, self.test_rollouts))\n for b in range(temp_batch_size):\n answer_pos = None\n seen = set()\n pos=0\n if self.pool == 'max':\n for r in sorted_indx[b]:\n if reward_reshape[b,r] == self.positive_reward:\n answer_pos = pos\n break\n if ce[b, r] not in seen:\n seen.add(ce[b, r])\n pos += 1\n if self.pool == 'sum':\n scores = defaultdict(list)\n answer = ''\n for r in sorted_indx[b]:\n scores[ce[b,r]].append(self.log_probs[b,r])\n if reward_reshape[b,r] == self.positive_reward:\n answer = ce[b,r]\n final_scores = defaultdict(float)\n for e in scores:\n final_scores[e] = lse(scores[e])\n sorted_answers = sorted(final_scores, key=final_scores.get, reverse=True)\n if answer in sorted_answers:\n answer_pos = sorted_answers.index(answer)\n else:\n answer_pos = None\n\n\n if answer_pos != None:\n if answer_pos < 20:\n final_reward_20 += 1\n if answer_pos < 10:\n final_reward_10 += 1\n if answer_pos < 5:\n final_reward_5 += 1\n if answer_pos < 3:\n final_reward_3 += 1\n if answer_pos < 1:\n final_reward_1 += 1\n if answer_pos == None:\n AP += 0\n else:\n AP += 1.0/((answer_pos+1))\n if print_paths:\n qr = self.train_environment.grapher.rev_relation_vocab[self.qr[b * self.test_rollouts]]\n start_e = self.rev_entity_vocab[episode.start_entities[b * self.test_rollouts]]\n end_e = self.rev_entity_vocab[episode.end_entities[b * self.test_rollouts]]\n paths[str(qr)].append(str(start_e) + \"\\t\" + str(end_e) + \"\\n\")\n paths[str(qr)].append(\"Reward:\" + str(1 if answer_pos != None and answer_pos < 10 else 0) + \"\\n\")\n for r in sorted_indx[b]:\n indx = b * self.test_rollouts + r\n if rewards[indx] == self.positive_reward:\n rev = 1\n else:\n rev = -1\n answers.append(self.rev_entity_vocab[se[b,r]]+'\\t'+ self.rev_entity_vocab[ce[b,r]]+'\\t'+ str(self.log_probs[b,r])+'\\n')\n paths[str(qr)].append(\n '\\t'.join([str(self.rev_entity_vocab[e[indx]]) for e in\n self.entity_trajectory]) + '\\n' + '\\t'.join(\n [str(self.rev_relation_vocab[re[indx]]) for re in self.relation_trajectory]) + '\\n' + str(\n rev) + '\\n' + str(\n self.log_probs[b, r]) + '\\n___' + '\\n')\n paths[str(qr)].append(\"#####################\\n\")\n\n all_final_reward_1 += final_reward_1\n all_final_reward_3 += final_reward_3\n all_final_reward_5 += final_reward_5\n all_final_reward_10 += final_reward_10\n all_final_reward_20 += final_reward_20\n auc += AP\n\n all_final_reward_1 /= total_examples\n all_final_reward_3 /= total_examples\n all_final_reward_5 /= total_examples\n all_final_reward_10 /= total_examples\n all_final_reward_20 /= total_examples\n auc /= total_examples\n if save_model:\n if auc >= self.max_hits_at_10:\n self.max_hits_at_10 = all_final_reward_10\n self.save_path = self.model_saver.save(sess, self.model_dir + \"model\" + '.ckpt')\n\n if print_paths:\n logger.info(\"[ printing paths at {} ]\".format(self.output_dir+'/test_beam/'))\n for q in paths:\n j = q.replace('/', '-')\n with codecs.open(self.path_logger_file_ + '_' + j, 'a', 'utf-8') as pos_file:\n for p in paths[q]:\n pos_file.write(p)\n with open(self.path_logger_file_ + 'answers', 'w') as answer_file:\n for a in answers:\n answer_file.write(a)\n '''\n with open(self.output_dir + '/scores.txt', 'a') as score_file:\n score_file.write(\"Hits@1: {0:7.4f}\".format(all_final_reward_1))\n score_file.write(\"\\n\")\n score_file.write(\"Hits@3: {0:7.4f}\".format(all_final_reward_3))\n score_file.write(\"\\n\")\n score_file.write(\"Hits@5: {0:7.4f}\".format(all_final_reward_5))\n score_file.write(\"\\n\")\n score_file.write(\"Hits@10: {0:7.4f}\".format(all_final_reward_10))\n score_file.write(\"\\n\")\n score_file.write(\"Hits@20: {0:7.4f}\".format(all_final_reward_20))\n score_file.write(\"\\n\")\n score_file.write(\"auc: {0:7.4f}\".format(auc))\n score_file.write(\"\\n\")\n score_file.write(\"\\n\")\n '''\n logger.info(\"Hits@1: {0:7.4f}\".format(all_final_reward_1))\n logger.info(\"Hits@3: {0:7.4f}\".format(all_final_reward_3))\n #logger.info(\"Hits@5: {0:7.4f}\".format(all_final_reward_5))\n logger.info(\"Hits@10: {0:7.4f}\".format(all_final_reward_10))\n logger.info(\"Hits@20: {0:7.4f}\".format(all_final_reward_20))\n #logger.info(\"auc: {0:7.4f}\".format(auc))\n\n def top_k(self, scores, k):\n scores = scores.reshape(-1, k * self.max_num_actions) # [B, (k*max_num_actions)]\n idx = np.argsort(scores, axis=1)\n idx = idx[:, -k:] # take the last k highest indices # [B , k]\n return idx.reshape((-1))\n\nif __name__ == '__main__':\n\n # read command line options\n options = read_options()\n # Set logging\n logger.setLevel(logging.INFO)\n fmt = logging.Formatter('%(asctime)s: [ %(message)s ]',\n '%m/%d/%Y %I:%M:%S %p')\n console = logging.StreamHandler()\n console.setFormatter(fmt)\n logger.addHandler(console)\n logfile = logging.FileHandler(options['log_file_name'], 'w')\n logfile.setFormatter(fmt)\n logger.addHandler(logfile)\n # read the vocab files, it will be used by many classes hence global scope\n logger.info('reading vocab files...')\n options['relation_vocab'] = json.load(open(options['vocab_dir'] + '/relation_vocab.json'))\n options['entity_vocab'] = json.load(open(options['vocab_dir'] + '/entity_vocab.json'))\n logger.info('Reading mid to name map')\n mid_to_word = {}\n # with open('/iesl/canvas/rajarshi/data/RL-Path-RNN/FB15k-237/fb15k_names', 'r') as f:\n # mid_to_word = json.load(f)\n logger.info('Done..')\n logger.info('Total number of entities {}'.format(len(options['entity_vocab'])))\n logger.info('Total number of relations {}'.format(len(options['relation_vocab'])))\n save_path = ''\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = False\n config.log_device_placement = False\n\n\n #Training\n if not options['load_model']:\n trainer = Trainer(options)\n with tf.Session(config=config) as sess:\n sess.run(trainer.initialize())\n trainer.initialize_pretrained_embeddings(sess=sess)\n\n trainer.train(sess)\n save_path = trainer.save_path\n path_logger_file = trainer.path_logger_file\n output_dir = trainer.output_dir\n\n tf.reset_default_graph()\n #Testing on test with best model\n else:\n logger.info(\"Skipping training\")\n logger.info(\"Loading model from {}\".format(options[\"model_load_dir\"]))\n\n trainer = Trainer(options)\n if options['load_model']:\n save_path = options['model_load_dir']\n path_logger_file = trainer.path_logger_file\n output_dir = trainer.output_dir\n with tf.Session(config=config) as sess:\n trainer.initialize(restore=save_path, sess=sess)\n\n trainer.test_rollouts = 100\n\n os.mkdir(path_logger_file + \"/\" + \"test_beam\")\n trainer.path_logger_file_ = path_logger_file + \"/\" + \"test_beam\" + \"/paths\"\n with open(output_dir + '/scores.txt', 'a') as score_file:\n score_file.write(\"Test (beam) scores with best model from \" + save_path + \"\\n\")\n trainer.test_environment = trainer.test_test_environment\n trainer.test_environment.test_rollouts = 100\n\n trainer.test(sess, beam=True, print_paths=True, save_model=False)\n\n # trainer.test_environment = trainer.dev_test_environment\n # trainer.test(sess, beam=True, print_paths=True, save_model=False)\n print (options['nell_evaluation'])\n if options['nell_evaluation'] == 1:\n nell_eval(path_logger_file + \"/\" + \"test_beam/\" + \"pathsanswers\", trainer.data_input_dir+'/sort_test.pairs' )\n\n"
] |
[
[
"tensorflow.control_dependencies",
"tensorflow.stack",
"numpy.mean",
"tensorflow.train.AdamOptimizer",
"tensorflow.Variable",
"numpy.reshape",
"numpy.arange",
"tensorflow.nn.moments",
"tensorflow.gradients",
"numpy.stack",
"tensorflow.train.exponential_decay",
"tensorflow.ConfigProto",
"tensorflow.div",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"numpy.zeros",
"tensorflow.unstack",
"numpy.isnan",
"tensorflow.placeholder",
"tensorflow.exp",
"tensorflow.global_variables_initializer",
"numpy.argsort",
"tensorflow.nn.embedding_lookup",
"numpy.sum",
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.reduce_mean",
"numpy.ones",
"tensorflow.clip_by_global_norm",
"tensorflow.variable_scope",
"tensorflow.sqrt",
"scipy.misc.logsumexp"
]
] |
ybbaigo/tensor2tensor
|
[
"211c8245bb4303a6a2519fa570ae170b26c99801"
] |
[
"tensor2tensor/layers/common_attention_test.py"
] |
[
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for common attention.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport kfac\nimport numpy as np\n\nfrom tensor2tensor.layers import common_attention\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.utils import test_utils\n\nimport tensorflow as tf\ntf.compat.v1.enable_eager_execution()\n\n\nclass CommonAttentionTest(parameterized.TestCase, tf.test.TestCase):\n\n @test_utils.run_in_graph_and_eager_modes()\n def testAddPositionalEmbedding(self):\n x = np.random.rand(5, 3, 12)\n y = common_attention.add_positional_embedding(\n tf.constant(x, dtype=tf.float32),\n max_length=4,\n name=\"pos_embedding\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 3, 12))\n\n @parameterized.parameters(\n {\"input_shape\": (5, 3, 12)},\n {\"input_shape\": (5, 5, 5, 12)},\n {\"input_shape\": (5, 3, 3, 3, 12)},\n )\n @test_utils.run_in_graph_and_eager_modes()\n def testAddPositionalEmbeddingNd(self, input_shape):\n x = np.random.rand(*input_shape)\n y = common_attention.add_positional_embedding_nd(\n tf.constant(x, dtype=tf.float32),\n max_length=5,\n name=\"pos_embedding\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, input_shape)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testDotProductAttention(self):\n x = np.random.rand(5, 7, 12, 32)\n y = np.random.rand(5, 7, 12, 32)\n a = common_attention.dot_product_attention(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32), None)\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 7, 12, 32))\n\n @parameterized.named_parameters(\n (\"\", 1, 1, 8, 4, 1, 2),\n (\"dynamic_batch\", None, 1, 8, 4, 1, 2),\n (\"batches\", 4, 3, 8, 4, 1, 2),\n (\"depth_v\", 1, 1, 8, 4, 3, 2),\n (\"block_length\", 1, 1, 8, 4, 1, 4),\n )\n def testMaskedWithinBlockLocalAttention1D(self, batch, heads, length,\n depth_k, depth_v, block_length):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, depth_k])\n k = tf.random_normal([batch, heads, length, depth_k])\n v = tf.random_normal([batch, heads, length, depth_v])\n output = common_attention.masked_within_block_local_attention_1d(\n q, k, v, block_length=block_length)\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, depth_v))\n\n @parameterized.named_parameters(\n (\"\", 1, 1, 8, 4, 1, 2),\n (\"dynamic_batch\", None, 1, 8, 4, 1, 2),\n (\"batches\", 4, 3, 8, 4, 1, 2),\n (\"depth_v\", 1, 1, 8, 4, 3, 2),\n (\"block_length\", 1, 1, 8, 4, 1, 4),\n )\n def testMaskedLocalAttention1D(self, batch, heads, length, depth_k, depth_v,\n block_length):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, depth_k])\n k = tf.random_normal([batch, heads, length, depth_k])\n v = tf.random_normal([batch, heads, length, depth_v])\n output = common_attention.masked_local_attention_1d(\n q, k, v, block_length=block_length)\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, depth_v))\n\n @parameterized.named_parameters(\n (\"\", 1, 1, 8, 4, 4, (2, 2)),\n (\"dynamic_batch\", None, 1, 8, 4, 4, (2, 2)),\n (\"batches\", 3, 2, 8, 4, 4, (2, 2)),\n # TODO(trandustin): Extend function to enable depth_k != depth_v.\n # (\"depth_v\", 1, 1, 8, 4, 1, (2, 2)),\n (\"query_shape\", 1, 1, 8, 4, 4, (4, 4)),\n )\n def testMaskedLocalAttention2D(self, batch, heads, length, depth_k, depth_v,\n query_shape):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, length, depth_k])\n k = tf.random_normal([batch, heads, length, length, depth_k])\n v = tf.random_normal([batch, heads, length, length, depth_v])\n output = common_attention.masked_local_attention_2d(\n q,\n k,\n v,\n query_shape=query_shape,\n memory_flange=(2, 2))\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, length, depth_v))\n\n @parameterized.named_parameters(\n (\"matching_block_length\", 3, 4, 25, 16, 16, 5),\n (\"unmatching_block_length\", 3, 4, 25, 16, 16, 4),\n (\"dynamic_batch\", None, 4, 25, 16, 16, 5),\n (\"different_depth_v\", 3, 4, 25, 16, 17, 5),\n )\n def testLocalUnmaskedAttention1D(self, batch, heads, length,\n depth_k, depth_v, block_length):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, depth_k])\n k = tf.random_normal([batch, heads, length, depth_k])\n v = tf.random_normal([batch, heads, length, depth_v])\n output = common_attention.local_attention_1d(\n q, k, v, block_length=block_length, filter_width=3)\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, depth_v))\n\n @parameterized.named_parameters(\n (\"matching_block_length\", 3, 4, 25, 16, 16, (4, 4)),\n (\"unmatching_block_length\", 3, 4, 25, 16, 16, (5, 5)),\n (\"dynamic_batch\", None, 4, 25, 16, 16, (4, 4)),\n # TODO(trandustin): Extend function to enable depth_k != depth_v.\n # (\"different_depth_v\", 3, 4, 25, 16, 17, (4, 4)),\n )\n def testLocalUnmaskedAttention2D(self, batch, heads, length,\n depth_k, depth_v, query_shape):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, length, depth_k])\n k = tf.random_normal([batch, heads, length, length, depth_k])\n v = tf.random_normal([batch, heads, length, length, depth_v])\n output = common_attention.local_attention_2d(\n q,\n k,\n v,\n query_shape=query_shape,\n memory_flange=(3, 3))\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, length, depth_v))\n\n @test_utils.run_in_graph_mode_only()\n def testMultiheadSelfAttentionMemoryEfficient(self):\n num_heads = 4\n io_size = 16\n batch = 2\n length = 7\n head_size = 5\n x = np.random.rand(batch, length, io_size)\n dy = np.random.rand(batch, length, io_size)\n with self.test_session() as session:\n x = tf.to_float(x)\n dy = tf.to_float(dy)\n bias = common_attention.attention_bias_lower_triangle(length)\n wqkv = tf.get_variable(\n \"wqkv\", [num_heads, 1, io_size, 3 * head_size],\n initializer=tf.random_normal_initializer(stddev=io_size**-0.5))\n wo = tf.get_variable(\n \"wo\", [num_heads, 1, head_size, io_size],\n initializer=tf.random_normal_initializer(\n stddev=(head_size * num_heads)**-0.5))\n norm_scale, norm_bias = common_layers.layer_norm_vars(io_size)\n y = common_attention.multihead_self_attention_memory_efficient(\n x, bias, num_heads, head_size=head_size, forget=False,\n test_vars=(wqkv, wo, norm_scale, norm_bias))\n y_forget = common_attention.multihead_self_attention_memory_efficient(\n x, bias, num_heads, head_size=head_size, forget=True,\n test_vars=(wqkv, wo, norm_scale, norm_bias))\n dx, dwqkv, dwo, dnorm_scale, dnorm_bias = tf.gradients(\n ys=[y], xs=[x, wqkv, wo, norm_scale, norm_bias], grad_ys=[dy])\n dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f = tf.gradients(\n ys=[y_forget], xs=[x, wqkv, wo, norm_scale, norm_bias], grad_ys=[dy])\n session.run(tf.global_variables_initializer())\n (y, y_forget,\n dx, dwqkv, dwo, dnorm_scale, dnorm_bias,\n dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f) = session.run(\n [y, y_forget,\n dx, dwqkv, dwo, dnorm_scale, dnorm_bias,\n dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f])\n self.assertAllClose(y, y_forget)\n self.assertAllClose(dwo, dwo_f)\n self.assertAllClose(dwqkv, dwqkv_f)\n self.assertAllClose(dnorm_scale, dnorm_scale_f)\n self.assertAllClose(dnorm_bias, dnorm_bias_f)\n self.assertAllClose(dx, dx_f)\n\n @test_utils.run_in_graph_and_eager_modes()\n def test2dGatherAndScatterInvertibility(self):\n \"\"\"2d gather and scatter invertibility test.\"\"\"\n batch_size = 2\n num_heads = 2\n height = 4\n width = 6\n depth = 8\n query_shape = (2, 3)\n x = np.random.rand(batch_size, num_heads, height, width, depth)\n x_indices = common_attention.gather_indices_2d(\n x, query_shape, query_shape)\n gathered_x = common_attention.gather_blocks_2d(x, x_indices)\n x_shape = tf.constant([batch_size, num_heads, height, width, depth])\n scattered_x = common_attention.scatter_blocks_2d(\n gathered_x, x_indices, x_shape)\n res = self.evaluate(scattered_x)\n self.assertAllClose(x, res)\n\n @test_utils.run_in_graph_and_eager_modes()\n def test2dBlockRasterScanMask(self):\n \"\"\"Testing the 2d block raster scan mask.\"\"\"\n query_shape = (2, 3)\n memory_flange = (2, 1)\n mask = common_attention.make_2d_block_raster_mask(\n query_shape, memory_flange)\n res = self.evaluate(mask)\n correct_mask = np.array(\n [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0,\n 1.0, 0.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,\n 1.0, 0.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 0.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 0.0, 0.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n self.assertAllClose(correct_mask, res)\n\n @test_utils.run_in_graph_and_eager_modes()\n def test2dGather(self):\n \"\"\"Testing 2d index gather and block gather functions.\"\"\"\n batch_size = 2\n num_heads = 2\n height = 4\n width = 6\n depth = 8\n query_shape = (2, 3)\n x = np.random.rand(batch_size, num_heads, height, width, depth)\n y = np.reshape(x, (batch_size, num_heads, -1, depth))\n correct_indices = [[0, 1, 2, 6, 7, 8],\n [3, 4, 5, 9, 10, 11],\n [12, 13, 14, 18, 19, 20],\n [15, 16, 17, 21, 22, 23]]\n correct_gathered_x = [[[y[0, 0, correct_indices[0]],\n y[0, 0, correct_indices[1]],\n y[0, 0, correct_indices[2]],\n y[0, 0, correct_indices[3]]],\n [y[0, 1, correct_indices[0]],\n y[0, 1, correct_indices[1]],\n y[0, 1, correct_indices[2]],\n y[0, 1, correct_indices[3]]]],\n [[y[1, 0, correct_indices[0]],\n y[1, 0, correct_indices[1]],\n y[1, 0, correct_indices[2]],\n y[1, 0, correct_indices[3]]],\n [y[1, 1, correct_indices[0]],\n y[1, 1, correct_indices[1]],\n y[1, 1, correct_indices[2]],\n y[1, 1, correct_indices[3]]]]]\n\n x_indices = common_attention.gather_indices_2d(\n x, query_shape, query_shape)\n gathered_x = common_attention.gather_blocks_2d(x, x_indices)\n x_indices, gathered_x = self.evaluate([x_indices, gathered_x])\n self.assertAllEqual(correct_indices, x_indices)\n self.assertAllClose(correct_gathered_x, gathered_x)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testGetMemoryRegion(self):\n \"\"\"Testing the function that gathers the flanged memory region.\"\"\"\n np.set_printoptions(threshold=np.inf)\n batch_size = 2\n num_heads = 2\n height = 4\n width = 6\n depth = 3\n query_shape = (2, 3)\n memory_flange = (1, 1)\n\n x = np.random.rand(batch_size, num_heads, height, width, depth)\n y = np.reshape(x, (batch_size, num_heads, -1, depth))\n zeros = np.zeros((depth), dtype=np.float32)\n five_zeros = np.array([zeros]*5)\n seven_zeros = np.array([zeros]*7)\n two_zeros = np.array([zeros]*2)\n zeros = np.array([zeros])\n\n correct_x_flange = [[[seven_zeros,\n np.concatenate((five_zeros, y[0, 0, [2, 8]]),\n axis=0),\n np.concatenate((zeros, y[0, 0, [6, 7, 8, 9]],\n two_zeros), axis=0),\n np.concatenate((y[0, 0, [8, 9, 10, 11]], zeros,\n y[0, 0, [14, 20]]), axis=0)],\n [seven_zeros,\n np.concatenate((five_zeros, y[0, 1, [2, 8]]),\n axis=0),\n np.concatenate((zeros, y[0, 1, [6, 7, 8, 9]],\n two_zeros), axis=0),\n np.concatenate((y[0, 1, [8, 9, 10, 11]], zeros,\n y[0, 1, [14, 20]]), axis=0)]],\n [[seven_zeros,\n np.concatenate((five_zeros, y[1, 0, [2, 8]]),\n axis=0),\n np.concatenate((zeros, y[1, 0, [6, 7, 8, 9]],\n two_zeros), axis=0),\n np.concatenate((y[1, 0, [8, 9, 10, 11]], zeros,\n y[1, 0, [14, 20]]), axis=0)],\n [seven_zeros,\n np.concatenate((five_zeros, y[1, 1, [2, 8]]),\n axis=0),\n np.concatenate((zeros, y[1, 1, [6, 7, 8, 9]],\n two_zeros), axis=0),\n np.concatenate((y[1, 1, [8, 9, 10, 11]], zeros,\n y[1, 1, [14, 20]]), axis=0)]]]\n correct_x_flange = np.array(correct_x_flange)\n correct_x_center = [[[y[0, 0, [0, 1, 2, 6, 7, 8]],\n y[0, 0, [3, 4, 5, 9, 10, 11]],\n y[0, 0, [12, 13, 14, 18, 19, 20]],\n y[0, 0, [15, 16, 17, 21, 22, 23]]],\n [y[0, 1, [0, 1, 2, 6, 7, 8]],\n y[0, 1, [3, 4, 5, 9, 10, 11]],\n y[0, 1, [12, 13, 14, 18, 19, 20]],\n y[0, 1, [15, 16, 17, 21, 22, 23]]]],\n [[y[1, 0, [0, 1, 2, 6, 7, 8]],\n y[1, 0, [3, 4, 5, 9, 10, 11]],\n y[1, 0, [12, 13, 14, 18, 19, 20]],\n y[1, 0, [15, 16, 17, 21, 22, 23]]],\n [y[1, 1, [0, 1, 2, 6, 7, 8]],\n y[1, 1, [3, 4, 5, 9, 10, 11]],\n y[1, 1, [12, 13, 14, 18, 19, 20]],\n y[1, 1, [15, 16, 17, 21, 22, 23]]]]]\n correct_x_center = np.array(correct_x_center)\n x_indices = common_attention.gather_indices_2d(\n x, query_shape, query_shape)\n x_flange, x_center = common_attention.get_memory_region(\n tf.constant(x, dtype=tf.float32),\n query_shape,\n memory_flange,\n x_indices)\n [x_flange, x_center] = self.evaluate([x_flange, x_center])\n self.assertAllClose(correct_x_flange, x_flange)\n self.assertAllClose(correct_x_center, x_center)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testGetShiftedCenterBlocks(self):\n \"\"\"Testing the function that gathers the flanged memory region.\"\"\"\n np.set_printoptions(threshold=np.inf)\n batch_size = 2\n num_heads = 2\n height = 4\n width = 6\n depth = 3\n query_shape = (2, 3)\n\n x = np.random.rand(batch_size, num_heads, height, width, depth)\n y = np.reshape(x, (batch_size, num_heads, -1, depth))\n zeros = np.zeros((depth), dtype=np.float32)\n zeros = np.array([zeros])\n\n correct_gathered_x = [[[np.concatenate((zeros, y[0, 0, [0, 1, 2, 6, 7]]),\n axis=0),\n np.concatenate((zeros, y[0, 0, [3, 4, 5, 9, 10]]),\n axis=0),\n np.concatenate((zeros,\n y[0, 0, [12, 13, 14, 18, 19]]),\n axis=0),\n np.concatenate((zeros,\n y[0, 0, [15, 16, 17, 21, 22]]),\n axis=0)],\n [np.concatenate((zeros, y[0, 1, [0, 1, 2, 6, 7]]),\n axis=0),\n np.concatenate((zeros, y[0, 1, [3, 4, 5, 9, 10]]),\n axis=0),\n np.concatenate((zeros,\n y[0, 1, [12, 13, 14, 18, 19]]),\n axis=0),\n np.concatenate((zeros,\n y[0, 1, [15, 16, 17, 21, 22]]),\n axis=0)]],\n [[np.concatenate((zeros, y[1, 0, [0, 1, 2, 6, 7]]),\n axis=0),\n np.concatenate((zeros, y[1, 0, [3, 4, 5, 9, 10]]),\n axis=0),\n np.concatenate((zeros,\n y[1, 0, [12, 13, 14, 18, 19]]),\n axis=0),\n np.concatenate((zeros,\n y[1, 0, [15, 16, 17, 21, 22]]),\n axis=0)],\n [np.concatenate((zeros, y[1, 1, [0, 1, 2, 6, 7]]),\n axis=0),\n np.concatenate((zeros, y[1, 1, [3, 4, 5, 9, 10]]),\n axis=0),\n np.concatenate((zeros,\n y[1, 1, [12, 13, 14, 18, 19]]),\n axis=0),\n np.concatenate((zeros,\n y[1, 1, [15, 16, 17, 21, 22]]),\n axis=0)]]]\n correct_gathered_x = np.array(correct_gathered_x)\n x_indices = common_attention.gather_indices_2d(\n x, query_shape, query_shape)\n gathered_x = common_attention.get_shifted_center_blocks(\n tf.constant(x, dtype=tf.float32),\n x_indices)\n x_indices, gathered_x = self.evaluate([x_indices, gathered_x])\n self.assertAllClose(correct_gathered_x, gathered_x)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testDotProductAttentionRelative(self):\n x = np.random.rand(5, 7, 12, 32)\n y = np.random.rand(5, 7, 12, 32)\n a = common_attention.dot_product_attention_relative(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=3)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 7, 12, 32))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n max_relative_position = 3\n a = common_attention.dot_product_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=False)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2SharedRel(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n max_relative_position = 3\n a = common_attention.dot_product_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=True)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2MaxRelativeLargerThanLength(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 3, 7)\n y = np.random.rand(5, 4, 3, 7)\n max_relative_position = 16\n a = common_attention.dot_product_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=False)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 3, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testDotProductUnMaskedAttentionRelativeV2(self):\n x = np.random.rand(5, 7, 12, 32)\n y = np.random.rand(5, 7, 12, 32)\n a = common_attention.dot_product_unmasked_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n 35)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 7, 12, 32))\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testExtractblocks(self):\n\n batch_size = 1\n num_heads = 3\n height = 6\n width = 10\n depth = 15\n block_h = 3\n block_w = 2\n t = np.random.rand(batch_size * num_heads, height, width, depth)\n a = common_attention._extract_blocks(t, block_h, block_w)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (batch_size * num_heads, height//block_h,\n width//block_w, block_h, block_w, depth))\n # also check if the content is right\n out = np.zeros((batch_size*num_heads, height//block_h,\n width//block_w, block_h, block_w, depth))\n for b in range(batch_size*num_heads):\n for x in range(height//block_h):\n for y in range(width//block_w):\n for v in range(block_h):\n for w in range(block_w):\n out[b, x, y, v, w] = t[b, block_h*x+v, block_w*y+w]\n self.assertAllClose(res, out)\n\n def python_get_2d_local_memory(self, t, batch_size, num_heads, height, width,\n num_h_blocks, num_w_blocks, query_shape,\n memory_flange, depth):\n # also check if the content is right\n out = np.zeros((batch_size, num_heads, height//query_shape[0],\n width//query_shape[1], query_shape[0]+2*memory_flange[0],\n query_shape[1]+2*memory_flange[1], depth))\n memory_height = query_shape[0]+2*memory_flange[0]\n memory_width = query_shape[1]+2*memory_flange[1]\n t_padded = np.pad(t, ((0, 0), (0, 0), (memory_flange[0], memory_flange[0]),\n (memory_flange[1], memory_flange[1]), (0, 0)),\n \"constant\",\n constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0)))\n for b in range(batch_size):\n for h in range(num_heads):\n for x in range(num_h_blocks):\n for y in range(num_w_blocks):\n for v in range(memory_height):\n for w in range(memory_width):\n memory_h_start = x*query_shape[0]\n memory_w_start = y*query_shape[1]\n memory_h_index = memory_h_start + v\n memory_w_index = memory_w_start + w\n out[b, h, x, y, v, w] = t_padded[b, h, memory_h_index,\n memory_w_index]\n return out\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testGet2dLocalMemory(self):\n batch_size = 3\n num_heads = 3\n height = 6\n width = 6\n depth = 15\n num_h_blocks = 3\n num_w_blocks = 3\n memory_flange = [1, 1]\n query_shape = [2, 2]\n t = np.random.rand(batch_size, num_heads, height, width, depth)\n a = common_attention.get_2d_local_memory_v2(\n np.reshape(t, (batch_size*num_heads, height, width, depth)),\n query_shape, memory_flange)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (batch_size*num_heads,\n num_h_blocks,\n num_w_blocks,\n query_shape[0]+2*memory_flange[0],\n query_shape[1]+2*memory_flange[1], depth))\n out = self.python_get_2d_local_memory(t, batch_size, num_heads,\n height, width, num_h_blocks,\n num_w_blocks, query_shape,\n memory_flange, depth)\n out = np.reshape(out, (batch_size*num_heads,\n num_h_blocks,\n num_w_blocks,\n query_shape[0]+2*memory_flange[0],\n query_shape[1]+2*memory_flange[1], depth))\n\n self.assertAllClose(res, out)\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testSplitAlongWidth(self):\n batch_size = 1\n num_heads = 3\n num_outer_h_blocks = 4\n num_outer_w_blocks = 8\n memory_flange = [2, 2]\n num_w_blocks = 3\n depth = 15\n t = np.random.rand(batch_size*num_heads, num_outer_h_blocks,\n num_outer_w_blocks, memory_flange[0], memory_flange[1],\n depth)\n a = common_attention._split_along_width(t)\n # self.evaluate(tf.global_variables_initializer())\n res_l, res_r = self.evaluate(a)\n # res = self.evaluate(a)\n self.assertEqual(res_l.shape, (batch_size*num_heads, num_outer_h_blocks,\n num_w_blocks, memory_flange[0],\n memory_flange[1], depth))\n self.assertEqual(res_r.shape, (batch_size*num_heads, num_outer_h_blocks,\n num_w_blocks, memory_flange[0],\n memory_flange[1], depth))\n # also check if the content is right\n out_l = np.zeros((batch_size*num_heads, num_outer_h_blocks, num_w_blocks,\n memory_flange[0], memory_flange[1], depth))\n out_r = np.zeros((batch_size*num_heads, num_outer_h_blocks, num_w_blocks,\n memory_flange[0], memory_flange[1], depth))\n block_h = memory_flange[0]\n block_w = memory_flange[1]\n for b in range(batch_size*num_heads):\n for x in range(num_outer_h_blocks):\n for y in range(num_w_blocks):\n for v in range(block_h):\n for w in range(block_w):\n # we should compute the index of the position in the\n out_l[b, x, y, v, w] = (\n t[b, x, 2*y, v, w]\n )\n out_r[b, x, y, v, w] = (\n t[b, x, 2*y+3, v, w]\n )\n self.assertAllClose(res_l, out_l)\n self.assertAllClose(res_r, out_r)\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testGetLeftRightBlocks(self):\n batch_size = 1\n num_heads = 3\n num_outer_h_blocks = 6\n num_outer_w_blocks = 6\n memory_flange = [2, 2]\n num_h_blocks = 2\n num_w_blocks = 2\n depth = 15\n t = np.random.rand(batch_size*num_heads, num_outer_h_blocks,\n num_outer_w_blocks, memory_flange[0], memory_flange[1],\n depth)\n a = common_attention._get_left_right_blocks(t)\n self.evaluate(tf.global_variables_initializer())\n res_l, res_r = self.evaluate(a)\n self.assertEqual(res_l.shape, (batch_size*num_heads, num_h_blocks,\n num_w_blocks, memory_flange[0]*2,\n memory_flange[1], depth))\n self.assertEqual(res_r.shape, (batch_size*num_heads, num_h_blocks,\n num_w_blocks, memory_flange[0]*2,\n memory_flange[1], depth))\n # also check if the content is right\n block_h = memory_flange[0]*2\n block_w = memory_flange[1]\n out_l = np.zeros((batch_size*num_heads, num_h_blocks,\n num_w_blocks, memory_flange[0]*2, memory_flange[1],\n depth))\n out_r = np.zeros((batch_size*num_heads, num_h_blocks,\n num_w_blocks, memory_flange[0]*2, memory_flange[1],\n depth))\n block_h = memory_flange[0]*2\n block_w = memory_flange[1]\n for b in range(batch_size*num_heads):\n for x in range(num_h_blocks):\n for y in range(num_w_blocks):\n for v in range(block_h):\n for w in range(block_w):\n # we should compute the index of the position in the\n outer_block_h_index = (\n 1 + block_h//memory_flange[0]*x + v//2)\n h_index = v%memory_flange[0]\n left_outer_w_index = 2*y\n right_outer_w_index = 2*y + 3\n out_l[b, x, y, v, w] = (\n t[b, outer_block_h_index, left_outer_w_index, h_index,\n w]\n )\n out_r[b, x, y, v, w] = (\n t[b, outer_block_h_index, right_outer_w_index, h_index,\n w]\n )\n self.assertAllClose(res_l, out_l)\n self.assertAllClose(res_r, out_r)\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testDotProductUnmaskedAttentionLocal2dTpu(self):\n batch_size = 1\n num_heads = 3\n height = 7\n width = 12\n depth = 15\n num_h_blocks = 4\n num_w_blocks = 6\n memory_flange = [1, 1]\n query_shape = [2, 2]\n memory_h = query_shape[0] + 2*memory_flange[0]\n memory_w = query_shape[1] + 2*memory_flange[1]\n\n q = np.random.rand(batch_size, num_heads, height, width, depth)\n k = np.random.rand(batch_size, num_heads, height, width, depth)\n v = np.random.rand(batch_size, num_heads, height, width, depth)\n a = common_attention.dot_product_unmasked_attention_local_2d_tpu(\n tf.constant(q, dtype=tf.float32),\n tf.constant(k, dtype=tf.float32),\n tf.constant(v, dtype=tf.float32), None, max_relative_position=None,\n query_shape=query_shape, dropout_rate=0.0, image_shapes=None,\n name=None, make_image_summary=False, dropout_broadcast_dims=None)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (batch_size, num_heads,\n height, width, depth))\n # now to check the content too\n # first pad q, k, ad v\n height_padding = -height % query_shape[0]\n width_padding = -width % query_shape[1]\n new_height = height + -height % query_shape[0]\n new_width = width + -width % query_shape[1]\n q = np.pad(q, ((0, 0), (0, 0), (0, height_padding),\n (0, width_padding), (0, 0)), \"constant\",\n constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0)))\n k = np.pad(k, ((0, 0), (0, 0), (0, height_padding),\n (0, width_padding), (0, 0)), \"constant\",\n constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0)))\n v = np.pad(v, ((0, 0), (0, 0), (0, height_padding),\n (0, width_padding), (0, 0)), \"constant\",\n constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0)))\n queries = self.python_get_2d_local_memory(q, batch_size, num_heads,\n new_height, new_width,\n num_h_blocks, num_w_blocks,\n query_shape, [0, 0],\n depth)\n keys = self.python_get_2d_local_memory(k, batch_size, num_heads,\n new_height, new_width, num_h_blocks,\n num_w_blocks, query_shape,\n memory_flange, depth)\n values = self.python_get_2d_local_memory(v, batch_size, num_heads,\n new_height, new_width,\n num_h_blocks, num_w_blocks,\n query_shape,\n memory_flange, depth)\n logits = np.matmul(\n np.reshape(queries, (batch_size, num_heads,\n num_h_blocks, num_w_blocks,\n query_shape[0]*query_shape[1], depth)),\n np.transpose(\n np.reshape(keys, (batch_size, num_heads, num_h_blocks, num_w_blocks,\n memory_h*memory_w, depth)), (0, 1, 2, 3, 5, 4)))\n # now to do a softmax across the logits\n att = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)\n att_output = np.matmul(att, np.reshape(\n values, (batch_size, num_heads, num_h_blocks, num_w_blocks,\n memory_h*memory_w, depth)))\n att_output = np.reshape(att_output,\n (batch_size, num_heads, num_h_blocks, num_w_blocks,\n query_shape[0], query_shape[1], depth))\n # putting the attention results back into the right place\n out = np.zeros((batch_size, num_heads, new_height, new_width, depth))\n for b in range(batch_size):\n for h in range(num_heads):\n for x in range(new_height):\n for y in range(new_width):\n h_block_index = x//query_shape[0]\n w_block_index = y//query_shape[1]\n inside_h_index = x%query_shape[0]\n inside_w_index = y%query_shape[1]\n out[b, h, x, y] = (\n att_output[b, h, h_block_index, w_block_index, inside_h_index,\n inside_w_index])\n out = out[:, :, :height, :width, :]\n self.assertAllClose(res, out)\n\n @tf.contrib.eager.run_test_in_graph_and_eager_modes()\n def testDotProductUnmaskedAttentionLocal2dTpuSimple(self):\n batch_size = 1\n num_heads = 3\n height = 8\n width = 12\n total_depth = 15\n num_h_blocks = 4\n num_w_blocks = 6\n depth = 5\n query_shape = [2, 2]\n\n x = np.random.rand(batch_size, height, width, total_depth)\n a = (\n common_attention.dot_product_unmasked_attention_local_2d_tpu_simple(\n tf.constant(x, dtype=tf.float32),\n None, total_depth, total_depth, num_heads,\n query_shape=query_shape))\n self.evaluate(tf.global_variables_initializer())\n res, q, k, v = self.evaluate(a)\n self.assertEqual(res.shape, (batch_size, height, width, total_depth))\n # reshape q, k, v from batch, heads, height*width to batch, heads,\n # num_h_blocks, num_w_blocks, query_shape[0], query_shape[1], depth\n resh_shape = (batch_size, num_h_blocks, num_w_blocks,\n num_heads, query_shape[0], query_shape[1],\n depth)\n resh = lambda l: np.reshape(l, resh_shape)\n q, k, v = map(resh, [q, k, v])\n trans = lambda l: np.transpose(l, (0, 3, 1, 2, 4, 5, 6))\n q, k, v = map(trans, [q, k, v])\n new_height = height + -height % query_shape[0]\n new_width = width + -width % query_shape[1]\n (queries, keys, values) = (q, k, v)\n logits = np.matmul(\n np.reshape(queries, (batch_size, num_heads,\n num_h_blocks, num_w_blocks,\n query_shape[0]*query_shape[1], depth)),\n np.transpose(\n np.reshape(keys, (batch_size, num_heads, num_h_blocks, num_w_blocks,\n query_shape[0]*query_shape[1], depth)),\n (0, 1, 2, 3, 5, 4)))\n # now to do a softmax across the logits\n att = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)\n att_output = np.matmul(att, np.reshape(\n values, (batch_size, num_heads, num_h_blocks, num_w_blocks,\n query_shape[0]*query_shape[1], depth)))\n att_output = np.reshape(att_output,\n (batch_size, num_heads, num_h_blocks, num_w_blocks,\n query_shape[0], query_shape[1], depth))\n # putting the attention results back into the right place\n out = np.zeros((batch_size, num_heads, new_height, new_width, depth))\n for b in range(batch_size):\n for h in range(num_heads):\n for x in range(new_height):\n for y in range(new_width):\n h_block_index = x//query_shape[0]\n w_block_index = y//query_shape[1]\n inside_h_index = x%query_shape[0]\n inside_w_index = y%query_shape[1]\n out[b, h, x, y] = (\n att_output[b, h, h_block_index, w_block_index, inside_h_index,\n inside_w_index])\n out = np.transpose(out, (0, 2, 3, 1, 4))\n out = np.reshape(out, (batch_size, new_height, new_width, total_depth))\n out = out[:, :height, :width, :]\n\n self.assertAllClose(res, out)\n\n def python_relative_att(self, q, k, v, batch, num_heads, height, width,\n depth, height_key_relative_embeddings,\n width_key_relative_embeddings,\n heads_share_relative_embedding):\n \"\"\"Relative attention computation in numpy.\n\n For query index (i,j) and key index (l, m) the logit is\n q_i k_j^T + q_i rh_{l-i}^T + q_i rw_{m-j}^T, where rh and ry are the set of\n relative embeddings in height and width spatial dimensions, respectively.\n\n Args:\n q: [batch, heads, height, width, depth] tensor\n k: [batch, heads, height, width, depth] tensor\n v: [batch, heads, height, width, depth] tensor\n batch: int scalar\n num_heads: int scalar\n height: int scalar\n width: int scalar\n depth: int scalar\n height_key_relative_embeddings: a tensor of relative embeddings\n width_key_relative_embeddings: a tensor of relative embeddings\n heads_share_relative_embedding: a boolean\n\n Returns:\n att_output: A tensor\n \"\"\"\n\n logits = np.zeros((batch, num_heads, height*width, height*width))\n for b in range(batch):\n for h in range(num_heads):\n for i in range(height*width):\n q_col = i%width\n q_row = int((i-q_col)/width)\n for j in range(height*width):\n k_col = j%width\n k_row = int((j-k_col)/width)\n logit = np.dot(q[b][h][q_row][q_col], k[b][h][k_row][k_col])\n width_rel_dist = k_col - q_col\n width_rel_index = width-1 + width_rel_dist\n if heads_share_relative_embedding:\n width_rel_logit = (\n np.dot(q[b][h][q_row][q_col],\n width_key_relative_embeddings[width_rel_index]))\n else:\n width_rel_logit = (\n np.dot(q[b][h][q_row][q_col],\n width_key_relative_embeddings[h][width_rel_index]))\n height_rel_dist = k_row - q_row\n height_rel_index = height-1 + height_rel_dist\n if heads_share_relative_embedding:\n height_rel_logit = (\n np.dot(q[b][h][q_row][q_col],\n height_key_relative_embeddings[height_rel_index]))\n else:\n height_rel_logit = (\n np.dot(q[b][h][q_row][q_col],\n height_key_relative_embeddings[h][height_rel_index]))\n logits[b, h, i, j] = logit + width_rel_logit + height_rel_logit\n # now to do a softmax across the logits\n att = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)\n # comparing the outputs\n att_output = np.matmul(att,\n np.reshape(v, (\n batch, num_heads, height*width, depth)))\n att_output = np.reshape(att_output,\n (batch, num_heads, height, width, depth))\n return att_output\n\n @test_utils.run_in_graph_and_eager_modes()\n def testDotProductUnMaskedAttentionRelative2d(self):\n batch = 1\n height = 3\n width = 3\n num_heads = 2\n max_relative_position = 6\n depth = 5\n heads_share_relative_embedding = False\n q = np.random.rand(batch, num_heads, height, width, depth)\n k = np.random.rand(batch, num_heads, height, width, depth)\n v = np.random.rand(batch, num_heads, height, width, depth)\n a = common_attention.dot_product_unmasked_self_attention_relative_2d(\n tf.constant(q, dtype=tf.float32),\n tf.constant(k, dtype=tf.float32),\n tf.constant(v, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=heads_share_relative_embedding)\n\n self.evaluate(tf.global_variables_initializer())\n res, height_key_relative_embeddings, width_key_relative_embeddings = (\n self.evaluate(a))\n att_output = self.python_relative_att(\n q, k, v, batch, num_heads, height, width, depth,\n height_key_relative_embeddings, width_key_relative_embeddings,\n heads_share_relative_embedding)\n self.assertEqual(res.shape, (batch, num_heads, height, width, depth))\n self.assertAllClose(res, att_output)\n\n @parameterized.parameters(\n (1, 10, 12, 2, 6, 3),\n (1, 1, 12, 2, 6, 3),\n (2, 10, 1, 2, 6, 3),\n (1, 10, 12, 2, 1, 1),\n (1, 10, 12, 2, 2, 8),\n (4, 10, 12, 2, 12, 10),\n )\n @test_utils.run_in_graph_and_eager_modes()\n def testDotProductUnMaskedAttentionRelative2dSharedOneRow(\n self, batch, height, width, num_heads, max_relative_position, depth):\n heads_share_relative_embedding = True\n q = np.random.rand(batch, num_heads, height, width, depth)\n k = np.random.rand(batch, num_heads, height, width, depth)\n v = np.random.rand(batch, num_heads, height, width, depth)\n\n a = common_attention.dot_product_unmasked_self_attention_relative_2d(\n tf.constant(q, dtype=tf.float32),\n tf.constant(k, dtype=tf.float32),\n tf.constant(v, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=heads_share_relative_embedding)\n\n self.evaluate(tf.global_variables_initializer())\n (res, height_key_relative_embeddings,\n width_key_relative_embeddings) = self.evaluate(a)\n att_output = self.python_relative_att(\n q, k, v, batch, num_heads, height, width, depth,\n height_key_relative_embeddings, width_key_relative_embeddings,\n heads_share_relative_embedding)\n self.assertEqual(res.shape,\n (batch, num_heads, height, width, depth))\n self.assertAllClose(res, att_output)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2Unmasked(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n max_relative_position = 3\n a = common_attention.dot_product_unmasked_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=False)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2UnmaskedSharedRel(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n max_relative_position = 3\n a = common_attention.dot_product_unmasked_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=True)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testRelativeAttentionV2UnmaskedRelativeLargerThanLength(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 3, 7)\n y = np.random.rand(5, 4, 3, 7)\n max_relative_position = 16\n a = common_attention.dot_product_unmasked_self_attention_relative_v2(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n None,\n max_relative_position=max_relative_position,\n heads_share_relative_embedding=False)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 3, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testMaskedRelativeLocalAttentionV2(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n block_length = 3\n a = common_attention.masked_relative_local_attention_1d(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n block_length=block_length,\n heads_share_relative_embedding=True,\n add_relative_to_values=False,\n name=\"masked_relative_local_attention_1d\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testMaskedRelativeLocalAttentionV2AddRelativeValues(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 4, 16, 7)\n y = np.random.rand(5, 4, 16, 7)\n block_length = 3\n a = common_attention.masked_relative_local_attention_1d(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n block_length=block_length,\n heads_share_relative_embedding=True,\n add_relative_to_values=False,\n name=\"masked_relative_local_attention_1d\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 4, 16, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testMaskedRelativeLocalAttentionV2SeqShorterThanBlockLength(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 7, 2, 7)\n y = np.random.rand(5, 7, 2, 7)\n block_length = 3\n a = common_attention.masked_relative_local_attention_1d(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n block_length=block_length,\n heads_share_relative_embedding=True,\n name=\"masked_relative_local_attention_1d\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 7, 2, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testMaskedRelativeLocalAttentionV2SeqShorterThanTwiceBlockLength(self):\n # (batch, heads, length, depth)\n x = np.random.rand(5, 7, 5, 7)\n y = np.random.rand(5, 7, 5, 7)\n block_length = 3\n a = common_attention.masked_relative_local_attention_1d(\n tf.constant(x, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n tf.constant(y, dtype=tf.float32),\n block_length=block_length,\n heads_share_relative_embedding=True,\n name=\"masked_relative_local_attention_1d\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(a)\n self.assertEqual(res.shape, (5, 7, 5, 7))\n\n def testBiasBatchCoordinates(self):\n \"\"\"Testing the batch coordinates mask.\"\"\"\n q = tf.constant([0, 0, 1, 1, 1, 1, 2, 2, 2], dtype=tf.int32)\n q = tf.expand_dims(q, axis=-1)\n\n k = tf.constant([0, 0, 0, 2, 2, 3, 3, 3], dtype=tf.int32)\n k = tf.expand_dims(k, axis=-1)\n\n ground_truth = np.array([\n [0, 0, 0, 1, 1, 1, 1, 1], # 0\n [0, 0, 0, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1], # 1 (just masked)\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 0, 0, 1, 1, 1], # 2\n [1, 1, 1, 0, 0, 1, 1, 1],\n [1, 1, 1, 0, 0, 1, 1, 1],\n ], np.float32) * -1e9\n\n bias = common_attention.attention_bias_coordinates(q, k)\n self.assertAllClose(self.evaluate(bias), ground_truth)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testBiasFuture(self):\n \"\"\"Testing the sequence order mask.\"\"\"\n q = tf.constant([0, 1, 2, 3, 0, 1, 2, 0, 1], dtype=tf.int32)\n q = tf.expand_dims(q, axis=-1)\n\n k = tf.constant([0, 1, 2, 3, 4, 0, 1, 2], dtype=tf.int32)\n k = tf.expand_dims(k, axis=-1)\n\n ground_truth = np.array([\n [0, 1, 1, 1, 1, 0, 1, 1], # 0\n [0, 0, 1, 1, 1, 0, 0, 1], # 1\n [0, 0, 0, 1, 1, 0, 0, 0], # 2\n [0, 0, 0, 0, 1, 0, 0, 0], # 3\n [0, 1, 1, 1, 1, 0, 1, 1], # 0\n [0, 0, 1, 1, 1, 0, 0, 1], # 1\n [0, 0, 0, 1, 1, 0, 0, 0], # 2\n [0, 1, 1, 1, 1, 0, 1, 1], # 0\n [0, 0, 1, 1, 1, 0, 0, 1], # 1\n ], np.float32) * -1e9\n\n bias = common_attention.attention_bias_future(q, k)\n self.assertAllClose(self.evaluate(bias), ground_truth)\n\n @test_utils.run_in_graph_mode_only()\n def testMultiheadAttentionWithLayerCollection(self):\n \"\"\"Testing multihead attention with layer collection for kfac.\"\"\"\n x = tf.zeros([3, 4, 5], tf.float32)\n layer_collection = kfac.LayerCollection()\n common_attention.multihead_attention(\n x, None, None, 10, 10, 10, 2, 0.2,\n layer_collection=layer_collection)\n self.assertLen(layer_collection.get_blocks(), 4)\n\n @parameterized.named_parameters(\n (\"\", 1, 1, 8, 4, 3),\n (\"dynamic_batch\", None, 1, 8, 4, 2),\n (\"batches\", 4, 3, 8, 4, 2),\n (\"block_length\", 1, 1, 8, 4, 4),\n )\n def testDilatedAttention(self, batch, heads, length, depth_v, block_length):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, depth_v])\n k = tf.random_normal([batch, heads, length, depth_v])\n v = tf.random_normal([batch, heads, length, depth_v])\n output = common_attention.dilated_self_attention_1d(\n q, k, v,\n query_block_size=block_length,\n memory_block_size=block_length,\n gap_size=2,\n num_memory_blocks=2)\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, depth_v))\n\n @parameterized.named_parameters(\n (\"\", 1, 1, 8, 4, 3),\n (\"dynamic_batch\", None, 1, 8, 4, 2),\n (\"batches\", 4, 3, 8, 4, 2),\n (\"block_length\", 1, 1, 8, 4, 4),\n )\n def testMaskedDilatedAttention(self, batch, heads, length, depth_v,\n block_length):\n if batch is None:\n batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32)\n q = tf.random_normal([batch, heads, length, depth_v])\n k = tf.random_normal([batch, heads, length, depth_v])\n v = tf.random_normal([batch, heads, length, depth_v])\n output = common_attention.masked_dilated_self_attention_1d(\n q, k, v,\n query_block_size=block_length,\n memory_block_size=block_length,\n gap_size=2,\n num_memory_blocks=2)\n if isinstance(batch, tf.Tensor):\n batch, res = self.evaluate([batch, output])\n else:\n res = self.evaluate(output)\n\n self.assertEqual(res.shape, (batch, heads, length, depth_v))\n\nif __name__ == \"__main__\":\n tf.test.main()\n\n"
] |
[
[
"numpy.dot",
"tensorflow.contrib.eager.run_test_in_graph_and_eager_modes",
"tensorflow.zeros",
"tensorflow.compat.v1.enable_eager_execution",
"numpy.concatenate",
"numpy.exp",
"numpy.pad",
"numpy.reshape",
"tensorflow.gradients",
"tensorflow.test.main",
"tensorflow.to_float",
"tensorflow.random_normal_initializer",
"numpy.zeros",
"tensorflow.global_variables_initializer",
"numpy.random.rand",
"numpy.transpose",
"numpy.array",
"tensorflow.constant",
"numpy.set_printoptions",
"tensorflow.expand_dims",
"tensorflow.random_uniform",
"tensorflow.random_normal"
]
] |
jaggernaut007/margipose-1
|
[
"ae0580cb7b3b41c21965cd32e280d2af0e8cf2c3"
] |
[
"margipose/bin/hyperparam_search.py"
] |
[
"#!/usr/bin/env python3\n\n\"\"\"Search for good training hyperparameters.\n\nThis code runs the LR range test proposed in \"Cyclical Learning Rates for Training Neural Networks\"\nby Leslie N. Smith.\n\"\"\"\n\nimport json\nfrom os import path, environ\n\nimport numpy as np\nimport plotly.graph_objs as go\nimport sacred\nimport tele\nimport torch\nfrom sacred.host_info import get_host_info\nfrom sacred.run import Run\nfrom tele.meter import ValueMeter\nfrom torch.optim import SGD\nfrom tqdm import tqdm\n\nfrom margipose.cli import Subcommand\nfrom margipose.dsntnn import average_loss\nfrom margipose.models import create_model\nfrom margipose.models.margipose_model import Default_MargiPose_Desc\nfrom margipose.models.chatterbox_model import Default_Chatterbox_Desc\nfrom margipose.train_helpers import create_train_dataloader, create_showoff_notebook\nfrom margipose.utils import seed_all, init_algorithms\n\nsacred.SETTINGS['DISCOVER_SOURCES'] = 'dir'\nex = sacred.Experiment(base_dir=path.realpath(path.join(__file__, '..', '..')))\n\nglobal_opts = {}\n\n\ndef forward_loss(model, out_var, target_var, mask_var, valid_depth):\n target_var = target_var.narrow(-1, 0, 3)\n\n if not 0 in valid_depth:\n losses = model.forward_3d_losses(out_var, target_var)\n elif not 1 in valid_depth:\n losses = model.forward_2d_losses(out_var, target_var)\n else:\n losses_3d = model.forward_3d_losses(out_var, target_var)\n losses_2d = model.forward_2d_losses(out_var, target_var)\n losses = torch.stack([\n (losses_3d[i] if use_3d == 1 else losses_2d[i])\n for i, use_3d in enumerate(valid_depth)\n ])\n\n return average_loss(losses, mask_var)\n\n\nex.add_named_config('margipose_model', model_desc=Default_MargiPose_Desc)\nex.add_named_config('chatterbox_model', model_desc=Default_Chatterbox_Desc)\n\nex.add_config(\n showoff=not not environ.get('SHOWOFF_URL'),\n batch_size=32,\n deterministic=False,\n train_datasets=['mpi3d-train', 'mpii-train'],\n lr_min=1e-1,\n lr_max=1e2,\n max_iters=1000,\n ema_beta=0.99, # Beta for exponential moving average of loss\n weight_decay=0,\n momentum=0.9,\n)\n\n\[email protected]\ndef sacred_main(_run: Run, seed, showoff, batch_size, model_desc, deterministic, train_datasets,\n lr_min, lr_max, max_iters, ema_beta, weight_decay, momentum):\n seed_all(seed)\n init_algorithms(deterministic=deterministic)\n\n model = create_model(model_desc).to(global_opts['device'])\n data_loader = create_train_dataloader(train_datasets, model.data_specs, batch_size,\n examples_per_epoch=(max_iters * batch_size))\n data_iter = iter(data_loader)\n\n print(json.dumps(model_desc, sort_keys=True, indent=2))\n\n def do_training_iteration(optimiser):\n batch = next(data_iter)\n\n in_var = batch['input'].to(global_opts['device'], torch.float32)\n target_var = batch['target'].to(global_opts['device'], torch.float32)\n mask_var = batch['joint_mask'].to(global_opts['device'], torch.float32)\n\n # Calculate predictions and loss\n out_var = model(in_var)\n loss = forward_loss(model, out_var, target_var, mask_var, batch['valid_depth'])\n\n # Calculate gradients\n optimiser.zero_grad()\n loss.backward()\n\n # Update parameters\n optimiser.step()\n\n return loss.item()\n\n optimiser = SGD(model.parameters(), lr=1, weight_decay=weight_decay, momentum=momentum)\n\n tel = tele.Telemetry({\n 'config': ValueMeter(skip_reset=True),\n 'host_info': ValueMeter(skip_reset=True),\n 'loss_lr_fig': ValueMeter(),\n })\n\n notebook = None\n if showoff:\n title = 'Hyperparameter search ({}@{})'.format(model_desc['type'], model_desc['version'])\n notebook = create_showoff_notebook(title, ['lrfinder'])\n\n from tele.showoff import views\n\n tel.sink(tele.showoff.Conf(notebook), [\n views.Inspect(['config'], 'Experiment configuration', flatten=True),\n views.Inspect(['host_info'], 'Host information', flatten=True),\n views.FrameContent(['loss_lr_fig'], 'Loss vs learning rate graph', 'plotly'),\n ])\n\n def set_progress(value):\n if notebook is not None:\n notebook.set_progress(value)\n\n tel['config'].set_value(_run.config)\n tel['host_info'].set_value(get_host_info())\n\n lrs = np.geomspace(lr_min, lr_max, max_iters)\n losses = []\n avg_loss = 0\n min_loss = np.inf\n for i, lr in enumerate(tqdm(lrs, ascii=True)):\n set_progress(i / len(lrs))\n\n for param_group in optimiser.param_groups:\n param_group['lr'] = lr\n loss = do_training_iteration(optimiser)\n avg_loss = ema_beta * avg_loss + (1 - ema_beta) * loss\n smoothed_loss = avg_loss / (1 - ema_beta ** (i + 1))\n if min_loss > 0 and smoothed_loss > 4 * min_loss:\n break\n min_loss = min(smoothed_loss, min_loss)\n losses.append(smoothed_loss)\n\n if i % 10 == 0:\n fig = go.Figure(\n data=[go.Scatter(x=lrs[:len(losses)].tolist(), y=losses, mode='lines')],\n layout=go.Layout(\n margin=go.Margin(l=60, r=40, b=80, t=20, pad=4),\n xaxis=go.XAxis(title='Learning rate', type='log', exponentformat='power'),\n yaxis=go.YAxis(title='Training loss'),\n )\n )\n tel['loss_lr_fig'].set_value(fig)\n tel.step()\n\n set_progress(1)\n\n\ndef main(argv, common_opts):\n global_opts.update(common_opts)\n ex.run_commandline(argv)\n\n\nHyperparams_Subcommand = Subcommand(name='hyperparams', func=main,\n help='search for good training hyperparameters')\n\nif __name__ == '__main__':\n Hyperparams_Subcommand.run()\n"
] |
[
[
"numpy.geomspace"
]
] |
mwoedlinger/compression
|
[
"387742e6efaa85ddf85510c461a14e1a33125404"
] |
[
"test.py"
] |
[
"import os\nimport argparse\nfrom model import *\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport json\nimport time\nfrom datasets import Datasets, TestKodakDataset\nfrom torch.utils.tensorboard import SummaryWriter\nfrom Meter import AverageMeter\ntorch.backends.cudnn.enabled = True\n# gpu_num = 4\ngpu_num = 1 # torch.cuda.device_count()\ncur_lr = base_lr = 1e-4 # * gpu_num\ntrain_lambda = 8192\nprint_freq = 100\ncal_step = 40\nwarmup_step = 0 # // gpu_num\nbatch_size = 4\ntot_epoch = 1000000\ntot_step = 2500000\ndecay_interval = 2200000\nlr_decay = 0.1\nimage_size = 256\nlogger = logging.getLogger(\"ImageCompression\")\ntb_logger = None\nglobal_step = 0\nsave_model_freq = 50000\ntest_step = 10000\nout_channel_N = 192\nout_channel_M = 320\nparser = argparse.ArgumentParser(\n description='Pytorch reimplement for variational image compression with a scale hyperprior')\n\nparser.add_argument('-n', '--name', default='',\n help='output training details')\nparser.add_argument('-p', '--pretrain', default='',\n help='load pretrain model')\nparser.add_argument('-t', '--test', default='',\n help='test dataset')\nparser.add_argument('--config', dest='config', required=False,\n help='hyperparameter in json format')\nparser.add_argument('--seed', default=234, type=int,\n help='seed for random functions, and network initialization')\nparser.add_argument('--val', dest='val_path', required=True,\n help='the path of validation dataset')\n\n\ndef parse_config(config):\n config = json.load(open(args.config))\n global tot_epoch, tot_step, base_lr, cur_lr, lr_decay, decay_interval, train_lambda, batch_size, print_freq, \\\n out_channel_M, out_channel_N, save_model_freq, test_step\n if 'tot_epoch' in config:\n tot_epoch = config['tot_epoch']\n if 'tot_step' in config:\n tot_step = config['tot_step']\n if 'train_lambda' in config:\n train_lambda = config['train_lambda']\n if train_lambda < 4096:\n out_channel_N = 128\n out_channel_M = 192\n else:\n out_channel_N = 192\n out_channel_M = 320\n if 'batch_size' in config:\n batch_size = config['batch_size']\n if \"print_freq\" in config:\n print_freq = config['print_freq']\n if \"test_step\" in config:\n test_step = config['test_step']\n if \"save_model_freq\" in config:\n save_model_freq = config['save_model_freq']\n if 'lr' in config:\n if 'base' in config['lr']:\n base_lr = config['lr']['base']\n cur_lr = base_lr\n if 'decay' in config['lr']:\n lr_decay = config['lr']['decay']\n if 'decay_interval' in config['lr']:\n decay_interval = config['lr']['decay_interval']\n if 'out_channel_N' in config:\n out_channel_N = config['out_channel_N']\n if 'out_channel_M' in config:\n out_channel_M = config['out_channel_M']\n\n\ndef test(step):\n with torch.no_grad():\n net.eval()\n sumBpp = 0\n sumPsnr = 0\n sumMsssim = 0\n sumMsssimDB = 0\n cnt = 0\n for batch_idx, input in enumerate(test_loader):\n clipped_recon_image, mse_loss, bpp_feature, bpp_z, bpp = net(input)\n mse_loss, bpp_feature, bpp_z, bpp = \\\n torch.mean(mse_loss), torch.mean(\n bpp_feature), torch.mean(bpp_z), torch.mean(bpp)\n psnr = 10 * (torch.log(1. / mse_loss) / np.log(10))\n sumBpp += bpp\n sumPsnr += psnr\n msssim = ms_ssim(clipped_recon_image.cpu().detach(),\n input, data_range=1.0, size_average=True)\n msssimDB = -10 * (torch.log(1-msssim) / np.log(10))\n sumMsssimDB += msssimDB\n sumMsssim += msssim\n cnt += 1\n logger.info(\"Num: {}, Bpp:{:.6f}, PSNR:{:.6f}, MS-SSIM:{:.6f}, MS-SSIM-DB:{:.6f}\".format(\n cnt, bpp, psnr, msssim, msssimDB))\n\n logger.info(\"Test on Kodak dataset: model-{}\".format(step))\n sumBpp /= cnt\n sumPsnr /= cnt\n sumMsssim /= cnt\n sumMsssimDB /= cnt\n logger.info(\"Dataset Average result---Dataset Num: {}, Bpp:{:.6f}, PSNR:{:.6f}, MS-SSIM:{:.6f}, MS-SSIM-DB:{:.6f}\".format(\n cnt, sumBpp, sumPsnr, sumMsssim, sumMsssimDB))\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n torch.manual_seed(seed=args.seed)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s] %(message)s')\n formatter = logging.Formatter(\n '[%(asctime)s][%(filename)s][L%(lineno)d][%(levelname)s] %(message)s')\n stdhandler = logging.StreamHandler()\n stdhandler.setLevel(logging.INFO)\n stdhandler.setFormatter(formatter)\n logger.addHandler(stdhandler)\n tb_logger = None\n logger.setLevel(logging.INFO)\n logger.info(\"image compression test\")\n logger.info(\"config : \")\n logger.info(open(args.config).read())\n parse_config(args.config)\n logger.info(\"out_channel_N:{}, out_channel_M:{}\".format(\n out_channel_N, out_channel_M))\n model = ImageCompressor(out_channel_N, out_channel_M)\n if args.pretrain != '':\n logger.info(\"loading model:{}\".format(args.pretrain))\n global_step = load_model(model, args.pretrain)\n net = model.to(torch.device(1))\n #net = torch.nn.DataParallel(net, list(range(gpu_num)))\n global test_loader\n if args.test == 'kodak':\n test_dataset = TestKodakDataset(data_dir=args.val_path)\n logger.info(\"No test dataset\")\n exit(-1)\n test_loader = DataLoader(dataset=test_dataset,\n shuffle=False, batch_size=1, pin_memory=True)\n test(global_step)\n"
] |
[
[
"torch.mean",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.log",
"torch.device"
]
] |
DVSimon/XGB-SF-Crime-Categorization
|
[
"5eb2a0a8f01abcf1a117116f7ee834209f76ca85"
] |
[
"XGB Source Code/Central.py"
] |
[
"\n# coding: utf-8\n\n# In[1]:\n\n\n#libraries\nimport pandas as pd\nimport numpy as np\nfrom numpy import column_stack\nfrom xgboost import XGBClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score\nfrom scipy import stats\nfrom time import time\nimport matplotlib.pyplot as plt\nfrom xgboost import plot_importance\nimport re\nfrom scipy import sparse\nfrom datetime import datetime\nimport pickle\nimport seaborn as sns\n\n\n# In[2]:\n\n\nn_threads = 36\n#set random_seed for reproduction purposes..\nrandom_seed = 10\n\n\n# In[3]:\n\n\n#convert date to seperate values for day, month, hour\ndef convert_date_to_day(dt):\n\n result = re.findall(r'\\d{4}-(\\d{2})-(\\d{2})T00:00:00.000',dt)\n\n return result[0][1]\n\n \n\ndef convert_date_to_month(dt):\n\n result = re.findall(r'\\d{4}-(\\d{2})-(\\d{2})T00:00:00.000',dt)\n\n return result[0][0]\n\n\ndef convert_date_to_year(dt):\n \n result = re.findall(r'(\\d{4})-(\\d{2})-(\\d{2})T00:00:00.000',dt)\n\n return result[0][0]\n\ndef convert_time_to_hour(tm):\n\n result = re.findall(r'(\\d{2}):\\d{2}',tm)\n\n return result[0]\n\n\n# In[4]:\n\n\ndf_district = pd.read_csv('/home/ubuntu/CSVs/CENTRAL_data.csv') #change this city for csv for whatever district being done\ndf_district = df_district.drop(columns=['pddistrict', 'incidntnum', 'pdid', 'location', 'descript'])\ndf_y = df_district['category']\ndf_x = df_district.drop(columns=['category'])\nlabelencoder = LabelEncoder()\nlabelencoder = labelencoder.fit(df_y)\nlabelencoded_y = labelencoder.transform(df_y)\ndf_x['day'] = df_x.date.apply(lambda x: convert_date_to_day(x))\ndf_x['month'] = df_x.date.apply(lambda x: convert_date_to_month(x))\ndf_x['year'] = df_x.date.apply(lambda x: convert_date_to_year(x))\ndf_x['hour'] = df_x.time.apply(lambda x: convert_time_to_hour(x))\ndf_x = df_x.drop(columns=['date', 'time'])\ndf_x['day'] = (df_x['day']).astype(int)\ndf_x['month'] = (df_x['month']).astype(int)\ndf_x['year'] = (df_x['year']).astype(int)\ndf_x['hour'] = (df_x['hour']).astype(int)\nlabel_encoder_addr = LabelEncoder()\naddr_feature = label_encoder_addr.fit_transform(df_x.address.iloc[:].values)\naddr_feature = addr_feature.reshape(df_x.shape[0], 1)\nonehot_encoder_addr = OneHotEncoder(sparse = False)\naddr_feature = onehot_encoder_addr.fit_transform(addr_feature)\nlabel_encoder_DoW = LabelEncoder()\nDoW_feature = label_encoder_DoW.fit_transform(df_x.dayofweek.iloc[:].values)\nDoW_feature = DoW_feature.reshape(df_x.shape[0], 1)\nonehot_encoder_DoW = OneHotEncoder(sparse = False)\nDoW_feature = onehot_encoder_DoW.fit_transform(DoW_feature)\nlabel_encoder_res = LabelEncoder()\nres_feature = label_encoder_res.fit_transform(df_x.resolution.iloc[:].values)\nres_feature = res_feature.reshape(df_x.shape[0], 1)\nonehot_encoder_res = OneHotEncoder(sparse = False)\nres_feature = onehot_encoder_res.fit_transform(res_feature)\n\nday = df_x.day.values\nmonth = df_x.month.values\nyear = df_x.year.values\nhour = df_x.hour.values\nx = df_x.x.values\ny = df_x.y.values\n\ncolumns = []\ncolumns.append(addr_feature)\ncolumns.append(DoW_feature)\ncolumns.append(res_feature)\ncolumns.append(x)\ncolumns.append(y)\ncolumns.append(day)\ncolumns.append(month)\ncolumns.append(year)\ncolumns.append(hour)\nencoded_feats = column_stack(columns)\nsparse_features = sparse.csr_matrix(encoded_feats)\n\n\n# In[5]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(sparse_features, labelencoded_y, test_size=0.20, random_state=random_seed)\n\nmodel = XGBClassifier(nthread = n_threads) #or -1\nkfold = StratifiedKFold(n_splits=3, shuffle=True, random_state=random_seed)\nparam_grid = {'n_estimators': [120, 240, 360, 480], #random int btwn 100 and 500 - removed\n 'learning_rate': stats.uniform(0.01, 0.08), #.01 + loc, range of .01+/-.08\n 'max_depth': [2, 4, 6, 8], #tree depths to check\n 'colsample_bytree': stats.uniform(0.3, 0.7) #btwn .1 and 1.0 \n}\nrand_search = RandomizedSearchCV(model, param_distributions = param_grid, scoring = 'f1_micro', n_iter = 3, n_jobs=-1, verbose = 10, cv=kfold)\nrand_result = rand_search.fit(X_train, y_train)\nprint(\"Best: %f using %s\" % (rand_result.best_score_, rand_result.best_params_))\nbest_XGB_parameters = rand_result.best_estimator_\n#INSERT CITY NAME FOR .DAT FILE\npickle.dump(best_XGB_parameters, open(\"xgb_CENTRAL.pickle.dat\", 'wb')) #change pickle\n\n\n# In[6]:\n\n\n#test on test set\nbest_XGB_parameters.fit(X_train, y_train)\npreds = best_XGB_parameters.predict(X_test)\nf1score = f1_score(y_test, preds, average = 'micro')\n#CSV append best score after test set\nf1_score = []\nf1_score.append(('Central', f1score))\nexport_df = pd.DataFrame(f1_score)\n#change csv name\nexport_df.to_csv(\"Central_results.dat\", index = False, header = False)\n\n"
] |
[
[
"pandas.read_csv",
"sklearn.model_selection.RandomizedSearchCV",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.model_selection.train_test_split",
"scipy.sparse.csr_matrix",
"sklearn.model_selection.StratifiedKFold",
"pandas.DataFrame",
"sklearn.metrics.f1_score.append",
"scipy.stats.uniform",
"numpy.column_stack",
"sklearn.metrics.f1_score",
"sklearn.preprocessing.LabelEncoder"
]
] |
SCAuFish/GraphGym
|
[
"85db2957e7aa406ee42ae5260092939fd0f67bab"
] |
[
"graphgym/contrib/layer/idconv.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Parameter\nfrom torch_scatter import scatter_add\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.utils import add_remaining_self_loops\nfrom torch_geometric.utils import remove_self_loops, add_self_loops, softmax\n\nfrom torch_geometric.nn.inits import glorot, zeros, reset\n\nfrom graphgym.config import cfg\nfrom graphgym.register import register_layer\n\n\nclass GeneralIDConvLayer(MessagePassing):\n def __init__(self, in_channels, out_channels, improved=False, cached=False,\n bias=True, **kwargs):\n super(GeneralIDConvLayer, self).__init__(aggr=cfg.gnn.agg, **kwargs)\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.improved = improved\n self.cached = cached\n self.normalize = cfg.gnn.normalize_adj\n\n self.weight = Parameter(torch.Tensor(in_channels, out_channels))\n self.weight_id = Parameter(torch.Tensor(in_channels, out_channels))\n\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n glorot(self.weight)\n glorot(self.weight_id)\n zeros(self.bias)\n self.cached_result = None\n self.cached_num_edges = None\n\n @staticmethod\n def norm(edge_index, num_nodes, edge_weight=None, improved=False,\n dtype=None):\n if edge_weight is None:\n edge_weight = torch.ones((edge_index.size(1),), dtype=dtype,\n device=edge_index.device)\n\n fill_value = 1.0 if not improved else 2.0\n edge_index, edge_weight = add_remaining_self_loops(\n edge_index, edge_weight, fill_value, num_nodes)\n\n row, col = edge_index\n deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)\n deg_inv_sqrt = deg.pow(-0.5)\n deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0\n\n return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]\n\n def forward(self, x, edge_index, id, edge_weight=None):\n \"\"\"\"\"\"\n x_id = torch.index_select(x, dim=0, index=id)\n x_id = torch.matmul(x_id, self.weight_id)\n x = torch.matmul(x, self.weight)\n x.index_add_(0, id, x_id)\n\n if self.cached and self.cached_result is not None:\n if edge_index.size(1) != self.cached_num_edges:\n raise RuntimeError(\n 'Cached {} number of edges, but found {}. Please '\n 'disable the caching behavior of this layer by removing '\n 'the `cached=True` argument in its constructor.'.format(\n self.cached_num_edges, edge_index.size(1)))\n\n if not self.cached or self.cached_result is None:\n self.cached_num_edges = edge_index.size(1)\n if self.normalize:\n edge_index, norm = self.norm(edge_index, x.size(self.node_dim),\n edge_weight, self.improved,\n x.dtype)\n else:\n norm = edge_weight\n self.cached_result = edge_index, norm\n\n edge_index, norm = self.cached_result\n\n return self.propagate(edge_index, x=x, norm=norm)\n\n def message(self, x_j, norm):\n return norm.view(-1, 1) * x_j if norm is not None else x_j\n\n def update(self, aggr_out):\n if self.bias is not None:\n aggr_out = aggr_out + self.bias\n return aggr_out\n\n def __repr__(self):\n return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\n self.out_channels)\n\n\nclass GCNIDConvLayer(MessagePassing):\n def __init__(self, in_channels, out_channels, improved=False, cached=False,\n bias=True, normalize=True, **kwargs):\n super(GCNIDConvLayer, self).__init__(aggr='add', **kwargs)\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.improved = improved\n self.cached = cached\n self.normalize = normalize\n\n self.weight = Parameter(torch.Tensor(in_channels, out_channels))\n self.weight_id = Parameter(torch.Tensor(in_channels, out_channels))\n\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n glorot(self.weight)\n glorot(self.weight_id)\n zeros(self.bias)\n self.cached_result = None\n self.cached_num_edges = None\n\n @staticmethod\n def norm(edge_index, num_nodes, edge_weight=None, improved=False,\n dtype=None):\n if edge_weight is None:\n edge_weight = torch.ones((edge_index.size(1),), dtype=dtype,\n device=edge_index.device)\n\n fill_value = 1.0 if not improved else 2.0\n edge_index, edge_weight = add_remaining_self_loops(\n edge_index, edge_weight, fill_value, num_nodes)\n\n row, col = edge_index\n deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)\n deg_inv_sqrt = deg.pow(-0.5)\n deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0\n\n return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]\n\n def forward(self, x, edge_index, id, edge_weight=None):\n \"\"\"\"\"\"\n x_id = torch.index_select(x, dim=0, index=id)\n x_id = torch.matmul(x_id, self.weight_id)\n x = torch.matmul(x, self.weight)\n x.index_add_(0, id, x_id)\n\n if self.cached and self.cached_result is not None:\n if edge_index.size(1) != self.cached_num_edges:\n raise RuntimeError(\n 'Cached {} number of edges, but found {}. Please '\n 'disable the caching behavior of this layer by removing '\n 'the `cached=True` argument in its constructor.'.format(\n self.cached_num_edges, edge_index.size(1)))\n\n if not self.cached or self.cached_result is None:\n self.cached_num_edges = edge_index.size(1)\n if self.normalize:\n edge_index, norm = self.norm(edge_index, x.size(self.node_dim),\n edge_weight, self.improved,\n x.dtype)\n else:\n norm = edge_weight\n self.cached_result = edge_index, norm\n\n edge_index, norm = self.cached_result\n\n return self.propagate(edge_index, x=x, norm=norm)\n\n def message(self, x_j, norm):\n return norm.view(-1, 1) * x_j if norm is not None else x_j\n\n def update(self, aggr_out):\n if self.bias is not None:\n aggr_out = aggr_out + self.bias\n return aggr_out\n\n def __repr__(self):\n return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\n self.out_channels)\n\n\nclass SAGEIDConvLayer(MessagePassing):\n def __init__(self, in_channels, out_channels, normalize=False,\n concat=False, bias=True, **kwargs):\n super(SAGEIDConvLayer, self).__init__(aggr='mean', **kwargs)\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.normalize = normalize\n self.concat = concat\n\n in_channels = 2 * in_channels if concat else in_channels\n self.weight = Parameter(torch.Tensor(in_channels, out_channels))\n self.weight_id = Parameter(torch.Tensor(in_channels, out_channels))\n\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n # uniform(self.weight.size(0), self.weight)\n # uniform(self.weight.size(0), self.weight_id)\n # uniform(self.weight.size(0), self.bias)\n glorot(self.weight)\n glorot(self.weight_id)\n zeros(self.bias)\n\n def forward(self, x, edge_index, id, edge_weight=None, size=None,\n res_n_id=None):\n \"\"\"\n Args:\n res_n_id (Tensor, optional): Residual node indices coming from\n :obj:`DataFlow` generated by :obj:`NeighborSampler` are used to\n select central node features in :obj:`x`.\n Required if operating in a bipartite graph and :obj:`concat` is\n :obj:`True`. (default: :obj:`None`)\n \"\"\"\n if not self.concat and torch.is_tensor(x):\n edge_index, edge_weight = add_remaining_self_loops(\n edge_index, edge_weight, 1, x.size(self.node_dim))\n\n return self.propagate(edge_index, size=size, x=x,\n edge_weight=edge_weight, res_n_id=res_n_id, id=id)\n\n def message(self, x_j, edge_weight):\n return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j\n\n def update(self, aggr_out, x, res_n_id, id):\n if self.concat and torch.is_tensor(x):\n aggr_out = torch.cat([x, aggr_out], dim=-1)\n elif self.concat and (isinstance(x, tuple) or isinstance(x, list)):\n assert res_n_id is not None\n aggr_out = torch.cat([x[0][res_n_id], aggr_out], dim=-1)\n\n aggr_out_id = torch.index_select(aggr_out, dim=0, index=id)\n aggr_out_id = torch.matmul(aggr_out_id, self.weight_id)\n aggr_out = torch.matmul(aggr_out, self.weight)\n aggr_out.index_add_(0, id, aggr_out_id)\n\n if self.bias is not None:\n aggr_out = aggr_out + self.bias\n\n if self.normalize:\n aggr_out = F.normalize(aggr_out, p=2, dim=-1)\n\n return aggr_out\n\n def __repr__(self):\n return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\n self.out_channels)\n\n\nclass GATIDConvLayer(MessagePassing):\n def __init__(self, in_channels, out_channels, heads=1, concat=True,\n negative_slope=0.2, dropout=0, bias=True, **kwargs):\n super(GATIDConvLayer, self).__init__(aggr='add', **kwargs)\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.heads = heads\n self.concat = concat\n self.negative_slope = negative_slope\n self.dropout = dropout\n\n self.weight = Parameter(torch.Tensor(in_channels,\n heads * out_channels))\n self.weight_id = Parameter(torch.Tensor(in_channels,\n heads * out_channels))\n self.att = Parameter(torch.Tensor(1, heads, 2 * out_channels))\n\n if bias and concat:\n self.bias = Parameter(torch.Tensor(heads * out_channels))\n elif bias and not concat:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n glorot(self.weight)\n glorot(self.weight_id)\n glorot(self.att)\n zeros(self.bias)\n\n def forward(self, x, edge_index, id, size=None):\n \"\"\"\"\"\"\n if size is None and torch.is_tensor(x):\n edge_index, _ = remove_self_loops(edge_index)\n edge_index, _ = add_self_loops(edge_index,\n num_nodes=x.size(self.node_dim))\n\n if torch.is_tensor(x):\n x_id = torch.index_select(x, dim=0, index=id)\n x_id = torch.matmul(x_id, self.weight_id)\n x = torch.matmul(x, self.weight)\n x.index_add_(0, id, x_id)\n else:\n x = (None if x[0] is None else torch.matmul(x[0], self.weight),\n None if x[1] is None else torch.matmul(x[1], self.weight))\n\n return self.propagate(edge_index, size=size, x=x)\n\n def message(self, edge_index_i, x_i, x_j, size_i):\n # Compute attention coefficients.\n x_j = x_j.view(-1, self.heads, self.out_channels)\n if x_i is None:\n alpha = (x_j * self.att[:, :, self.out_channels:]).sum(dim=-1)\n else:\n x_i = x_i.view(-1, self.heads, self.out_channels)\n alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)\n\n alpha = F.leaky_relu(alpha, self.negative_slope)\n alpha = softmax(alpha, edge_index_i, num_nodes=size_i)\n\n # Sample attention coefficients stochastically.\n alpha = F.dropout(alpha, p=self.dropout, training=self.training)\n\n return x_j * alpha.view(-1, self.heads, 1)\n\n def update(self, aggr_out):\n if self.concat is True:\n aggr_out = aggr_out.view(-1, self.heads * self.out_channels)\n else:\n aggr_out = aggr_out.mean(dim=1)\n\n if self.bias is not None:\n aggr_out = aggr_out + self.bias\n return aggr_out\n\n def __repr__(self):\n return '{}({}, {}, heads={})'.format(self.__class__.__name__,\n self.in_channels,\n self.out_channels, self.heads)\n\n\nclass GINIDConvLayer(MessagePassing):\n def __init__(self, nn, nn_id, eps=0, train_eps=False, **kwargs):\n super(GINIDConvLayer, self).__init__(aggr='add', **kwargs)\n self.nn = nn\n self.nn_id = nn_id\n self.initial_eps = eps\n if train_eps:\n self.eps = torch.nn.Parameter(torch.Tensor([eps]))\n else:\n self.register_buffer('eps', torch.Tensor([eps]))\n self.reset_parameters()\n\n def reset_parameters(self):\n reset(self.nn)\n reset(self.nn_id)\n self.eps.data.fill_(self.initial_eps)\n\n def forward(self, x, edge_index, id):\n \"\"\"\"\"\"\n x = x.unsqueeze(-1) if x.dim() == 1 else x\n edge_index, _ = remove_self_loops(edge_index)\n x = (1 + self.eps) * x + self.propagate(edge_index, x=x)\n x_id = torch.index_select(x, dim=0, index=id)\n x_id = self.nn_id(x_id)\n x = self.nn(x)\n x.index_add_(0, id, x_id)\n return x\n\n def message(self, x_j):\n return x_j\n\n def __repr__(self):\n return '{}(nn={})'.format(self.__class__.__name__, self.nn)\n\n\nclass GeneralIDConv(nn.Module):\n def __init__(self, dim_in, dim_out, bias=False, **kwargs):\n super(GeneralIDConv, self).__init__()\n self.model = GeneralIDConvLayer(dim_in, dim_out, bias=bias)\n\n def forward(self, batch):\n batch.node_feature = self.model(batch.node_feature, batch.edge_index,\n batch.node_id_index)\n return batch\n\n\nclass GCNIDConv(nn.Module):\n def __init__(self, dim_in, dim_out, bias=False, **kwargs):\n super(GCNIDConv, self).__init__()\n self.model = GCNIDConvLayer(dim_in, dim_out, bias=bias)\n\n def forward(self, batch):\n batch.node_feature = self.model(batch.node_feature, batch.edge_index,\n batch.node_id_index)\n return batch\n\n\nclass SAGEIDConv(nn.Module):\n def __init__(self, dim_in, dim_out, bias=False, **kwargs):\n super(SAGEIDConv, self).__init__()\n self.model = SAGEIDConvLayer(dim_in, dim_out, bias=bias, concat=True)\n\n def forward(self, batch):\n batch.node_feature = self.model(batch.node_feature, batch.edge_index,\n batch.node_id_index)\n return batch\n\n\nclass GATIDConv(nn.Module):\n def __init__(self, dim_in, dim_out, bias=False, **kwargs):\n super(GATIDConv, self).__init__()\n self.model = GATIDConvLayer(dim_in, dim_out, bias=bias)\n\n def forward(self, batch):\n batch.node_feature = self.model(batch.node_feature, batch.edge_index,\n batch.node_id_index)\n return batch\n\n\nclass GINIDConv(nn.Module):\n def __init__(self, dim_in, dim_out, bias=False, **kwargs):\n super(GINIDConv, self).__init__()\n gin_nn = nn.Sequential(nn.Linear(dim_in, dim_out), nn.ReLU(),\n nn.Linear(dim_out, dim_out))\n gin_nn_id = nn.Sequential(nn.Linear(dim_in, dim_out), nn.ReLU(),\n nn.Linear(dim_out, dim_out))\n self.model = GINIDConvLayer(gin_nn, gin_nn_id)\n\n def forward(self, batch):\n batch.node_feature = self.model(batch.node_feature, batch.edge_index,\n batch.node_id_index)\n return batch\n\n\nregister_layer('idconv', GeneralIDConv)\nregister_layer('gcnidconv', GCNIDConv)\nregister_layer('sageidconv', SAGEIDConv)\nregister_layer('gatidconv', GATIDConv)\nregister_layer('ginidconv', GINIDConv)\n"
] |
[
[
"torch.nn.functional.normalize",
"torch.Tensor",
"torch.nn.functional.dropout",
"torch.cat",
"torch.is_tensor",
"torch.matmul",
"torch.nn.Linear",
"torch.nn.functional.leaky_relu",
"torch.nn.ReLU",
"torch.index_select"
]
] |
LEO1222/DMAT_test
|
[
"6fee58fc811922bcdc6c8bd380a7f9d1acd4b817"
] |
[
"Aplan.py"
] |
[
"#!/usr/bin/env python3\r\nimport mhi.pscad\r\nimport logging\r\nimport mhi.pscad.handler\r\nimport os, openpyxl\r\nimport pandas as pd\r\n\r\n\r\nclass BuildEventHandler(mhi.pscad.handler.BuildEvent):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self._start = {}\r\n\r\n def _build_event(self, phase, status, project, elapsed, **kwargs):\r\n\r\n key = (project, phase)\r\n if status == 'BEGIN':\r\n self._start[key] = elapsed\r\n else:\r\n sec = elapsed - self._start[key]\r\n name = project if project else '[All]'\r\n LOG.info(\"%s %s: %.3f sec\", name, phase, sec)\r\n\r\n\r\n# Log 'INFO' messages & above. Include level & module name.\r\nlogging.basicConfig(level=logging.INFO,\r\n format=\"%(levelname)-8s %(name)-26s %(message)s\")\r\n\r\n# Ignore INFO msgs from automation (eg, mhi.pscad, mhi.pscad.pscad, ...)\r\nlogging.getLogger('mhi.pscad').setLevel(logging.WARNING)\r\n\r\nLOG = logging.getLogger('main')\r\n\r\nversions = mhi.pscad.versions()\r\nLOG.info(\"PSCAD Versions: %s\", versions)\r\n\r\n# Skip any 'Alpha' versions, if other choices exist\r\nvers = [(ver, x64) for ver, x64 in versions if ver != 'Alpha']\r\nif len(vers) > 0:\r\n versions = vers\r\n\r\n# Skip any 'Beta' versions, if other choices exist\r\nvers = [(ver, x64) for ver, x64 in versions if ver != 'Beta']\r\nif len(vers) > 0:\r\n versions = vers\r\n\r\n# Skip any 32-bit versions, if other choices exist\r\nvers = [(ver, x64) for ver, x64 in versions if x64]\r\nif len(vers) > 0:\r\n versions = vers\r\n\r\nLOG.info(\" After filtering: %s\", versions)\r\n\r\n# Of any remaining versions, choose the \"lexically largest\" one.\r\nversion, x64 = sorted(versions)[-1]\r\nLOG.info(\" Selected PSCAD version: %s %d-bit\", version, 64 if x64 else 32)\r\n\r\n# # Get all installed FORTRAN compiler versions\r\n# fortrans = mhi.pscad.fortran_versions()\r\n# LOG.info(\"FORTRAN Versions: %s\", fortrans)\r\n\r\n# # Skip 'GFortran' compilers, if other choices exist\r\n# vers = [ver for ver in fortrans if 'GFortran' not in ver]\r\n# if len(vers) > 0:\r\n# fortrans = vers\r\n\r\n# LOG.info(\" After filtering: %s\", fortrans)\r\n\r\n# # Order the remaining compilers, choose the last one (highest revision)\r\n# fortran = sorted(fortrans)[-1]\r\n# LOG.info(\" Selected FORTRAN version: %s\", fortran)\r\n\r\n# Get all installed Matlab versions\r\nmatlabs = mhi.pscad.matlab_versions()\r\nLOG.info(\"Matlab Versions: %s\", matlabs)\r\n\r\n# Get the highest installed version of Matlab:\r\nmatlab = sorted(matlabs)[-1] if matlabs else ''\r\nLOG.info(\" Selected Matlab version: %s\", matlab)\r\n\r\n# Launch PSCAD\r\nLOG.info(\"Launching: %s FORTRAN=%r Matlab=%r\",\r\n version, 'Not Available', matlab)\r\npscad = mhi.pscad.launch(minimize=False, version=version, x64=x64)\r\n\r\nif pscad:\r\n\r\n try:\r\n\r\n # Load only the pscx project file\r\n pscad.load(r\"C:\\Users\\Niu2021\\Desktop\\integration\\tests_integration.pscx\")\r\n\r\n # Get the list of simulation sets\r\n sim_sets = pscad.simulation_sets()\r\n if len(sim_sets) > 0:\r\n LOG.info(\"Simulation sets: %s\", sim_sets)\r\n\r\n # For each simulation set ...\r\n for sim_set_name in sim_sets:\r\n # ... run it\r\n LOG.info(\"Running simulation set '%s'\", sim_set_name)\r\n sim_set = pscad.simulation_set(sim_set_name)\r\n sim_set.run()\r\n LOG.info(\"Simulation set '%s' complete\", sim_set_name)\r\n else:\r\n # Run project\r\n \r\n FaultTest = pscad.project(\"tests_integration\")\r\n\r\n\r\n ############################120_Large_Disturbance_Test#########################\r\n ############################# Determine excel file path ##################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'large_disturbance120.xlsx')\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n ############################################################################################\r\n \r\n #generate variable list\r\n list_Duration = (data_value[0])[1:] #variable 1\r\n list_FaultType = (data_value[1])[1:]\r\n list_P = (data_value[6])[1:]\r\n list_Q = (data_value[7])[1:]\r\n list_Rs = (data_value[4])[1:]\r\n list_Xs = (data_value[5])[1:]\r\n list_Rf = (data_value[2])[1:]\r\n list_Xf = (data_value[3])[1:] #variable 8\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n canvas1 = FaultTest.canvas(\"Grid_Side_Ctrl\") # get the controller of grid side controller canvas\r\n \r\n # Use canvas controller to find components by name\r\n Duration = canvas.find(\"master:const\", \"Duration_Setting\")\r\n FaultType = canvas.find(\"master:const\", \"FaultType_Setting\")\r\n Q = canvas1.find(\"master:const\", \"Qref_DMAT\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n Rf = canvas.find(\"master:const\", \"Rfault\")\r\n Xf = canvas.find(\"master:const\", \"Xfault\")\r\n\r\n\r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Enabled\"\r\n\r\n # Run each case \r\n for index in range(len(list_Rs)): \r\n # Change variables each cycle\r\n Duration.parameters(Name=\"Duration_Setting\", Value=list_Duration[index])\r\n FaultType.parameters(Name=\"FaultType_Setting\", Value=list_FaultType[index])\r\n Q.parameters(Name=\"Qref_DMAT\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n Rf.parameters(Name=\"Rfault\", Value=list_Rf[index])\r\n Xf.parameters(Name=\"Xfault\", Value=list_Xf[index])\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"large_disturbance120_test{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n ############################## Muyuan_summerTerm #######################\r\n ############################## Determine excel file path ###############################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'muyuan_part.xlsx')\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n ## DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n \r\n #generate variable list\r\n lst_switch1 = (data_value[0])[1:] #variable 1\r\n lst_switch2 = (data_value[1])[1:]\r\n lst_switch3 = (data_value[2])[1:]\r\n lst_switch4 = (data_value[3])[1:]\r\n lst_switch5 = (data_value[4])[1:]\r\n lst_switch6 = (data_value[5])[1:]\r\n lst_Rgrid = (data_value[6])[1:]\r\n lst_Xgrid = (data_value[7])[1:] #variable 8\r\n\r\n # Select the specific component\r\n canvas0 = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n canvas1 = FaultTest.canvas(\"Grid_Side_Ctrl\") # get the controller of grid side controller canvas\r\n canvas2 = FaultTest.canvas(\"Machin_Side_Ctrl\") # get the controller of Machine side controller canvas\r\n canvas3 = FaultTest.canvas(\"WindTurbine_Mechanical\") #get the controller of WindTurbine controller canvas\r\n\r\n # Use canvas controller to find components by name\r\n switch1 = canvas1.find(\"master:const\", \"switch1\")\r\n switch2 = canvas1.find(\"master:const\", \"switch2\")\r\n switch3 = canvas2.find(\"master:const\", \"switch3\") #in grid-side controller\r\n switch4 = canvas2.find(\"master:const\", \"switch4\")\r\n switch5 = canvas3.find(\"master:const\", \"switch5\")\r\n switch6 = canvas3.find(\"master:const\", \"switch6\")\r\n Rgrid = canvas0.find(\"master:const\", \"Rgrid\")\r\n Xgrid = canvas0.find(\"master:const\", \"Xgrid\")\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Enabled\"\r\n NonMuyuan.state = \"Disabled\"\r\n large1to120.state = \"Disabled\"\r\n \r\n # Run each case \r\n for index in range(len(lst_switch1)):\r\n # Change variables each cycle\r\n switch1.parameters(Name=\"switch1\", Value=lst_switch1[index])\r\n switch2.parameters(Name=\"switch2\", Value=lst_switch2[index])\r\n switch3.parameters(Name=\"switch3\", Value=lst_switch3[index])\r\n switch4.parameters(Name=\"switch4\", Value=lst_switch4[index])\r\n switch5.parameters(Name=\"switch5\", Value=lst_switch5[index])\r\n switch6.parameters(Name=\"switch6\", Value=lst_switch6[index])\r\n Rgrid.parameters(Name=\"Rgrid\", Value=lst_Rgrid[index])\r\n Xgrid.parameters(Name=\"Xgrid\", Value=lst_Xgrid[index])\r\n \r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"summer_term_muyuan_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n \r\n ######################### Yuxiang 206FRT #################################################\r\n ############################# Determine excel file path ##################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'yuxiang_206_225FRT.xlsx')\r\n print('The xlsFile path is:',xlsPath)\r\n print('-'*60)\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n ############################################################################################\r\n #generate variable list\r\n list_Duration = (data_value[0])[1:] #variable 1\r\n list_FaultType = (data_value[1])[1:]\r\n list_P = (data_value[6])[1:]\r\n list_Q = (data_value[7])[1:]\r\n list_Rs = (data_value[4])[1:]\r\n list_Xs = (data_value[5])[1:]\r\n list_Rf = (data_value[2])[1:]\r\n list_Xf = (data_value[3])[1:] #variable 8\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n canvas1 = FaultTest.canvas(\"Grid_Side_Ctrl\") # get the controller of grid side controller canvas\r\n \r\n # Use canvas controller to find components by name\r\n Duration = canvas.find(\"master:const\", \"Duration_Setting\")\r\n FaultType = canvas.find(\"master:const\", \"FaultType_Setting\")\r\n Q = canvas1.find(\"master:const\", \"Qref_DMAT\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n Rf = canvas.find(\"master:const\", \"Rfault\")\r\n Xf = canvas.find(\"master:const\", \"Xfault\")\r\n\r\n\r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Enabled\"\r\n\r\n # Run each case \r\n for index in range(len(list_Rs)): \r\n # Change variables each cycle\r\n Duration.parameters(Name=\"Duration_Setting\", Value=list_Duration[index])\r\n FaultType.parameters(Name=\"FaultType_Setting\", Value=list_FaultType[index])\r\n Q.parameters(Name=\"Qref_DMAT\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n Rf.parameters(Name=\"Rfault\", Value=list_Rf[index])\r\n Xf.parameters(Name=\"Xfault\", Value=list_Xf[index])\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"yuxiang_206FRT_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n ########################################## Yuxiang Part ###############################################################\r\n ######################################### Determine excel file path ##############################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'yuxiang_part.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n ####################################################################################################################\r\n #generate variable list\r\n list_Duration = (data_value[0])[1:]\r\n list_FaultType = (data_value[1])[1:]\r\n list_P = (data_value[6])[1:]\r\n list_Q = (data_value[7])[1:]\r\n list_Rs = (data_value[4])[1:]\r\n list_Xs = (data_value[5])[1:]\r\n list_Rf = (data_value[2])[1:]\r\n list_Xf = (data_value[3])[1:] \r\n list_Rs_post=(data_value[8])[1:]\r\n list_Xs_post = (data_value[9])[1:]\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n \r\n # Use canvas controller to find components by name\r\n Duration = canvas.find(\"master:const\", \"Duration_Setting\")\r\n FaultType = canvas.find(\"master:const\", \"FaultType_Setting\")\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n Rf = canvas.find(\"master:const\", \"Rfault\")\r\n Xf = canvas.find(\"master:const\", \"Xfault\")\r\n Rs_post = canvas.find(\"master:const\", \"Rgrid_post\")\r\n Xs_post = canvas.find(\"master:const\", \"Xgrid_post\")\r\n \r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Enabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n \r\n # Run each case\r\n for index in range(len(list_Rs)): \r\n # Change variables each cycle\r\n Duration.parameters(Name=\"Duration_Setting\", Value=list_Duration[index])\r\n FaultType.parameters(Name=\"FaultType_Setting\", Value=list_FaultType[index])\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n Rf.parameters(Name=\"Rfault\", Value=list_Rf[index])\r\n Xf.parameters(Name=\"Xfault\", Value=list_Xf[index])\r\n Rs_post.parameters(Name=\"Rgrid_post\", Value=list_Rs_post[index])\r\n Xs_post.parameters(Name=\"Xgrid_post\", Value=list_Xs_post[index])\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"yuxiang_summer_term_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n ################################## Ziheng 170Blue #######################################\r\n ######################################## Determine excel file path ########################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test170-173.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n ########################################################################################################################\r\n \r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Enabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n\r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_170blue_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output) \r\n\r\n ######################## Ziheng 170 grey ############################################ \r\n ############################################### Determine excel file path ###############################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test170-173.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n ##############################################################################################################################\r\n \r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Enabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n\r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"t{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_170grey_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n \r\n ################################# Ziheng 170 orange ############################################\r\n ############################################# Determine excel file path #############################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test170-173.xlsx')\r\n print('The xlsFile path is:',xlsPath)\r\n print('-'*60)\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n #####################################################################################################################\r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Enabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case \r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_170orange_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n \r\n ##################################### Ziheng 170 yellow ###############################################################\r\n ######################################### Determine excel file path #############################################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test170-173.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n #################################################################################################################################\r\n \r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Enabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_yellow_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n \r\n ############################################ Ziheng 174 blue ############################################\r\n ################################################# Determine excel file path #################################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test174-177.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n #################################################################################################################################\r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Enabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n\r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_174blue_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n\r\n ########################################## Ziheng 174 yellow ############################################\r\n ################################################ Determine excel file path ################################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test174-177.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n ##############################################################################################################################\r\n \r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n large1to120.state = \"Disabled\"\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Enabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\" \r\n \r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n\r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_174yellow_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n\r\n ##################################### Ziheng 186 Blue #######################################\r\n ######################################## Determine excel file path ####################################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test186-189.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n ########################################################################################################################\r\n \r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Enabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_186blue_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n ######################################## Ziheng 186 green #######################################\r\n ################################################## Determine excel file path ################################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test186-189.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n ###############################################################################################################################\r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Enabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_186green_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output) \r\n\r\n ########################################## Ziheng 186 red ############################################\r\n ################################################ Determine excel file path ################################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test186-189.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n #############################################################################################################################\r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Enabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n # Rf.parameters(Name=\"Rfault\", Value=list_Rf[index])\r\n # Xf.parameters(Name=\"Xfault\", Value=list_Xf[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_186red_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n\r\n ############################################ Ziheng 190-192 #######################################\r\n ################################################## Determine excel file path ################################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test190-192.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n ###############################################################################################################################\r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n list_Frequency = (data_value[4])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n Frequency= canvas.find(\"master:const\", \"Oscillatory Frequ\")\r\n\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Enabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n Frequency.parameters(Name=\"Oscillatory Frequ\", Value=list_Frequency[index])\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_190_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n ############################################ 193_198_40 #################################################################\r\n ############################################## Determine excel file path ################################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test193-198.xlsx')\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n ###########################################################################################################################\r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n canvas1 = FaultTest.canvas(\"Grid_Side_Ctrl\") # get the controller of grid side controller canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Enabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_193_+40_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n ################################## Ziheng 193_198_60 ######################################\r\n ############################################## Determine excel file path #####################################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test193-198.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n #################################################################################################################################\r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Enabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n\r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_193_+60_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n ############################ Ziheng 193 198_-40 #################################\r\n ############################################################ Determine excel file path##############################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test193-198.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n #####################################################################################################################################\r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Enabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_193_-40_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output) \r\n \r\n \r\n \r\n ############################ Ziheng 193 198_-60 #################################\r\n ############################################################ Determine excel file path##############################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test193-198.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n #####################################################################################################################################\r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Enabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case\r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_193_-60_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n ################################# Ziheng 155-160 #####################################\r\n ################################################# Determine excel file path #############################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test155-160.xlsx')\r\n\r\n #Read data in excel \r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n ##########################################################################################################################\r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Enabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_155_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n ################################################### Ziheng 178-181 ########################################\r\n ############################################################### Determine excel file path #############################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test178-181.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n #######################################################################################################################################\r\n \r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n canvas1 = FaultTest.canvas(\"Grid_Side_Ctrl\") # get the controller of grid side controller canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n # Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Enabled\"\r\n figure9.state = \"Disabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n \r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_178_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n ######################################## Ziheng 182-185 ###########################################\r\n ######################################################## Determine excel file path ###################################################\r\n path = r'C:\\Users\\Niu2021\\Desktop\\integration\\input_data'\r\n xlsPath = os.path.join(path,'test182-185.xlsx')\r\n\r\n ## Read data in excel\r\n df = pd.read_excel(xlsPath) # default read the firt sheet in Excel and save as a DataFrame\r\n\r\n # DataFrame -----> Python List\r\n data_value = df.values.tolist()\r\n #######################################################################################################################################\r\n #generate variable list\r\n list_P = (data_value[2])[1:]\r\n list_Q = (data_value[3])[1:]\r\n list_Rs = (data_value[0])[1:]\r\n list_Xs = (data_value[1])[1:]\r\n\r\n\r\n # Select the specific component\r\n canvas = FaultTest.canvas(\"Main\") # get the controller of main canvas\r\n canvas1 = FaultTest.canvas(\"Grid_Side_Ctrl\") # get the controller of grid side controller canvas\r\n \r\n # Use canvas controller to find components by name\r\n Q = canvas.find(\"master:const\", \"Q_Setting\") #in grid-side controller\r\n P = canvas.find(\"master:const\", \"P_Setting\")\r\n Rs = canvas.find(\"master:const\", \"Rgrid\")\r\n Xs = canvas.find(\"master:const\", \"Xgrid\")\r\n\r\n \r\n # Select the layer (enabled/disabled)\r\n TOVLay = FaultTest.layer(\"TOV_layer\")\r\n figure23 = FaultTest.layer(\"figure23\")\r\n figure8 = FaultTest.layer(\"figure8\")\r\n figure9 = FaultTest.layer(\"figure9\")\r\n figure10green = FaultTest.layer(\"figure10green\")\r\n figure10blue = FaultTest.layer(\"figure10blue\")\r\n figure10red = FaultTest.layer(\"figure10red\")\r\n figure111hz = FaultTest.layer(\"figure111hz\")\r\n figure1110hz = FaultTest.layer(\"figure1110hz\")\r\n figure6blue = FaultTest.layer(\"figure6blue\")\r\n figure6orange = FaultTest.layer(\"figure6orange\")\r\n figure6grey = FaultTest.layer(\"figure6grey\")\r\n figure6yellow = FaultTest.layer(\"figure6yellow\")\r\n figure7blue = FaultTest.layer(\"figure7blue\")\r\n figure7yellow = FaultTest.layer(\"figure7yellow\")\r\n table1340 = FaultTest.layer(\"table1340\")\r\n table13minus40 = FaultTest.layer(\"tableminus1340\")\r\n table1360 = FaultTest.layer(\"table1360\")\r\n table13minus60 = FaultTest.layer(\"tableminus1360\")\r\n Yuxiangtest = FaultTest.layer(\"Yuxiangtest\")\r\n Muyuantest = FaultTest.layer(\"Muyuantest\")\r\n NonMuyuan = FaultTest.layer(\"NonMuyuan\")\r\n large1to120 = FaultTest.layer(\"large1to120\")\r\n\r\n #Layer Settings\r\n TOVLay.state = \"Disabled\"\r\n figure23.state = \"Disabled\"\r\n figure8.state = \"Disabled\"\r\n figure9.state = \"Enabled\"\r\n figure10green.state = \"Disabled\"\r\n figure10blue.state = \"Disabled\"\r\n figure10red.state = \"Disabled\" \r\n figure111hz.state = \"Disabled\" \r\n figure1110hz.state = \"Disabled\" \r\n figure6blue.state = \"Disabled\" \r\n figure6orange.state = \"Disabled\" \r\n figure6grey.state = \"Disabled\" \r\n figure6yellow.state = \"Disabled\" \r\n figure7blue.state = \"Disabled\" \r\n figure7yellow.state = \"Disabled\"\r\n table1340.state = \"Disabled\"\r\n table13minus40.state = \"Disabled\"\r\n table1360.state = \"Disabled\"\r\n table13minus60.state = \"Disabled\"\r\n Yuxiangtest.state = \"Disabled\"\r\n Muyuantest.state = \"Disabled\"\r\n NonMuyuan.state = \"Enabled\"\r\n large1to120.state = \"Disabled\"\r\n\r\n # Run each case (168)\r\n for index in range(len(list_Rs)):\r\n \r\n # Change variables each cycle\r\n Q.parameters(Name=\"Q_Setting\", Value=list_Q[index])\r\n P.parameters(Name=\"P_Setting\", Value=list_P[index])\r\n Rs.parameters(Name=\"Rgrid\", Value=list_Rs[index])\r\n Xs.parameters(Name=\"Xgrid\", Value=list_Xs[index])\r\n\r\n\r\n # Saving the output file\r\n FaultTest.parameters(PlotType=\"1\", output_filename=f\"ziheng_182_{index+1}.out\")\r\n FaultTest.run()\r\n\r\n\r\n messages = FaultTest.messages()\r\n for msg in messages:\r\n print(\"%s %s %s\" % (msg.scope, msg.status, msg.text))\r\n\r\n print(\"-\"*60)\r\n output = FaultTest.output()\r\n print(output)\r\n\r\n finally:\r\n # Exit PSCAD\r\n pscad.quit()\r\n\r\nelse:\r\n LOG.error(\"Failed to launch PSCAD\")"
] |
[
[
"pandas.read_excel"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.