repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
aiidateam/acwf-verification-scripts
[ "15e4625fa30e7c9f6742a4846682141e6b79ea15" ]
[ "3-analyze/analysis-scripts/formation-energies/plot_histo_formation_energies.py" ]
[ "#!/usr/bin/env python\nimport json\nimport os\nimport sys\n\nimport numpy as np\nimport pylab as pl\nfrom scipy.optimize import curve_fit\n\nBINS = 100\nPRINT_THRESHOLD = 0.01 # eV/atom\nVERBOSE = True\n\nOUT_FOLDER = 'formation-energies-output'\n\ndef gaussian(x, a, x0, sigma):\n return a * np.exp(-(x - x0)**2 / (2 * sigma**2))\n\n\ndef get_dissimilarities(plugin1, plugin2, what):\n\n fname = f'formation-energies-{plugin1}-VS-{plugin2}.json'\n try:\n with open(f'{OUT_FOLDER}/{fname}') as fhandle:\n raw_data = json.load(fhandle)\n except OSError:\n print(f\"No file '{fname}' found in the '{OUT_FOLDER}' subfolder! Run ./compute_formation_energies.py first.\")\n sys.exit(1)\n\n dissimilarities = []\n missing_count = 0\n\n if what == 'formation-energy':\n formation_data = raw_data['formation_energies']\n for system, data in formation_data.items():\n # System is something like \"Ac-X2O|X2O3|XO3\"\n plugin1_formation_energy = data[plugin1]\n plugin2_formation_energy = data[plugin2]\n if plugin1_formation_energy is None or plugin2_formation_energy is None:\n # Deal with missing data for at least one plugin\n if VERBOSE:\n print(f\"WARNING: missing {system}\")\n missing_count += 1 \n continue\n dissimilarities.append((plugin2_formation_energy - plugin1_formation_energy, system, plugin1_formation_energy, plugin2_formation_energy))\n elif what == 'unaries':\n unaries_data = raw_data['unaries_energy_difference']\n for element, data in unaries_data.items():\n reference_configuration = data['reference_configuration']\n for configuration, plugin_data in data['configurations'].items():\n if configuration == reference_configuration:\n # Avoid to store zeros for the reference configuration, that would\n # bias the final plot\n continue\n plugin1_energy_difference = plugin_data[plugin1]\n plugin2_energy_difference = plugin_data[plugin2]\n if plugin1_energy_difference is None or plugin2_energy_difference is None:\n if VERBOSE:\n print(f\"WARNING: missing {element} - {configuration}\")\n # Deal with missing data for at least one plugin\n missing_count += 1\n continue\n dissimilarities.append(\n (plugin2_energy_difference - plugin1_energy_difference,\n f\"{element}-{configuration}-wrt-{reference_configuration}\",\n plugin1_energy_difference,\n plugin2_energy_difference))\n else:\n raise ValueError(f\"Unknown value of 'what': '{what}'\")\n\n if missing_count:\n print(f\"WARNING: {missing_count} systems missing when checking data for '{what}'\")\n\n return dissimilarities\n\ndef generate_plots(plugin1, plugin2, what, x_zoom_factor=1., abs_x_range=None):\n\n # x_zoom_factor: Adapt this factor to change the zoom on the x axis\n # The default zoom is obtained from the standard deviation of the data\n # This is a multiplicative number; a number > 1 means zoom in, a number < 1 means zoom out\n\n # if abs_x_range is passed, x_zoom_factor is ignored and data is plotted in the range\n # [-abs_x_range, +abs_x_range]\n\n\n # Plotting\n fig = pl.figure(figsize=(18,6))\n\n TINY_SIZE = 18\n SMALL_SIZE = 20\n MEDIUM_SIZE = 24\n BIGGER_SIZE = 28\n\n pl.rc('font', size=SMALL_SIZE)# controls default text sizes\n pl.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title\n pl.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n pl.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n pl.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n pl.rc('legend', fontsize=TINY_SIZE) # legend fontsize\n pl.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\n dissimilarities = get_dissimilarities(plugin1, plugin2, what=what)\n\n flat_data = np.array([_[0] for _ in dissimilarities])\n \n if abs_x_range is None:\n half_range = np.sqrt(np.mean(np.array(flat_data)**2)) / x_zoom_factor\n else:\n half_range = abs_x_range\n\n still_on_left = len(flat_data[flat_data < -half_range])\n still_on_right = len(flat_data[flat_data > half_range])\n\n hist_y, bins, patches = pl.hist(\n flat_data, bins=BINS, range=[-half_range, half_range],\n label=f\"{still_on_left} more on the left, {still_on_right} more on the right\"\n )\n\n # Fit Gaussian and plot it\n hist_x = (bins[1:] + bins[:-1])/2\n popt, pcov = curve_fit(gaussian, hist_x, hist_y, p0=[10., 0., 0.001])\n x = np.linspace(pl.xlim()[0], pl.xlim()[1], 1000)\n sigma = abs(popt[2])\n ## NOTES ON THE RELATION BETWEEN THE SIGMA OF THE GAUSSIAN AND THE FWHM\n # np.exp(-HWHM**2/(2*sigma**2)) = 1/2\n # -HWHM**2/(2*sigma**2) = ln(1/2)\n # HWHM**2/(2*sigma**2) = ln(2)\n # HWHM**2 = ln(2) * (2*sigma**2)\n # HWHM = sqrt(ln(2)) * sqrt(2) * sigma\n # FWHM = 2*HWHM = 2*sqrt(2)*sqrt(ln(2)) * sigma\n pl.plot(x, gaussian(x, *popt), 'r:', label=rf'Gaussian fit (FWHM = {2*np.sqrt(2)*np.sqrt(np.log(2))*sigma:.5f})')\n pl.axvline(popt[1], color='r', linestyle=':')\n # Reset the xlim\n pl.xlim(x[0], x[-1])\n\n pl.legend(loc='upper right')\n pl.xlabel(\"Formation energy dissimilarity (eV/atom)\")\n pl.ylabel(\"Frequency\")\n pl.title(f\"{plugin1} VS {plugin2} ({what})\")\n pl.xlim(-half_range, half_range)\n pl.tight_layout()\n pl.savefig(f\"{OUT_FOLDER}/histogram-{what}-{plugin1}-VS-{plugin2}.png\")\n pl.close(fig)\n\n abs_dissimilarities = [\n (abs(_[0]), _[1], _[2], _[3]) for _ in dissimilarities]\n \n # Sort in-place from the largest dissimilarity (in abs value)\n abs_dissimilarities.sort()\n abs_dissimilarities.reverse()\n\n with open(f\"{OUT_FOLDER}/discrepancies-{what}-{plugin1}-VS-{plugin2}.txt\", \"w\") as fhandle:\n fhandle.write(f\"## {plugin1} VS {plugin2}\\n\")\n if what == 'formation-energy':\n fhandle.write(f\"## Cases with abs(formation energies) > {PRINT_THRESHOLD} eV/atom:\\n\")\n elif what == 'unaries':\n fhandle.write(f\"## Cases with abs(energy difference) > {PRINT_THRESHOLD} eV/atom:\\n\")\n else:\n raise ValueError(f\"Unknown value of 'what': '{what}'\")\n for dissimilarity, system, data_plugin1, data_plugin2 in abs_dissimilarities:\n if dissimilarity < PRINT_THRESHOLD:\n # Here I assume I already sorted them\n break\n fhandle.write(f\"{system:30s}: {dissimilarity:.6f} ({data_plugin1} vs {data_plugin2})\\n\")\n\nif __name__ == \"__main__\":\n try:\n plugin1_name = sys.argv[1]\n plugin2_name = sys.argv[2]\n what = sys.argv[3]\n zoom_value = sys.argv[4:5]\n if zoom_value == []:\n zoom_value = 1.\n else:\n zoom_value = zoom_value[0]\n except IndexError:\n print(\"Pass as two parameters the two plugins to compare, a third parameter with 'what' to compare, and a fourth (optional) parameter as the zoom factor\")\n sys.exit(1)\n try:\n zoom_value = float(zoom_value)\n except IndexError:\n print(\"The zoom factor must be a float number\")\n sys.exit(1)\n\n generate_plots(plugin1_name, plugin2_name, what, zoom_value)\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.array", "numpy.exp", "scipy.optimize.curve_fit" ] ]
cliulinnaeus/exatrkx-neurips19_tf2
[ "ee2f22ad786b870818f88e25ec1b72ebc0b6e969" ]
[ "gnn-tracking/heptrkx/postprocess/pathfinder.py" ]
[ "\"\"\"\nLoop over all hits;\nfor each hit, find next hit that has maximum weight among all available edge candidates\n\"\"\"\nimport numpy as np\nimport networkx as nx\n\nfrom .utils_fit import poly_fit, poly_val\n\ndef get_tracks(graph, weights, hit_ids, weight_cutoff):\n hits_in_tracks = []\n hits_idx_in_tracks = []\n all_tracks = []\n\n n_hits = graph.X.shape[0]\n for idx in range(n_hits):\n # Loop over all hits\n # and save hits that are used in a track\n hit_id = hit_ids[idx]\n if hit_id not in hits_in_tracks:\n hits_in_tracks.append(hit_id)\n hits_idx_in_tracks.append(idx)\n else:\n continue\n\n a_track = [hit_id]\n while(True):\n # for this hit index (idx),\n # find its outgoing hits that could form a track\n hit_out = graph.Ro[idx]\n if hit_out.nonzero()[0].shape[0] < 1:\n break\n weighted_outgoing = np.argsort((hit_out * weights))\n if weights[weighted_outgoing[-1]] < weight_cutoff:\n break\n ii = -1\n has_next_hit = False\n while abs(ii) < 15:\n weight_idx = weighted_outgoing[ii]\n next_hit = graph.Ri[:, weight_idx].nonzero()\n if next_hit[0].shape[0] > 0:\n next_hit_id = next_hit[0][0]\n if next_hit_id != idx and next_hit_id not in hits_idx_in_tracks:\n hits_in_tracks.append(hit_ids[next_hit_id])\n hits_idx_in_tracks.append(next_hit_id)\n a_track .append(hit_ids[next_hit_id])\n idx = next_hit_id\n has_next_hit = True\n break\n ii -= 1\n\n if not has_next_hit:\n # no more out-going tracks\n break\n all_tracks.append(a_track)\n return all_tracks\n\n\ndef get_tracks2(G, th=0.5, feature_name='solution'):\n used_nodes = []\n sub_graphs = []\n for node in G.nodes():\n if node in used_nodes:\n continue\n a_track = longest_track(\n G, node,\n used_nodes, th=th, feature_name=feature_name)\n if len(a_track) < 1:\n used_nodes.append(node)\n continue\n\n sub = nx.edge_subgraph(G, a_track)\n sub_graphs.append(sub)\n used_nodes += list(sub.nodes())\n\n n_tracks = len(sub_graphs)\n print(\"total tracks:\", n_tracks)\n return sub_graphs\n" ]
[ [ "numpy.argsort" ] ]
jpuigcerver/tensorflow
[ "231ca9dd4e258b898cc76a283a90050fd17ee69a" ]
[ "tensorflow/python/ops/variable_scope.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A class to store named variables and a scope operator to manage sharing.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections as collections_lib\nimport copy\nimport enum # pylint: disable=g-bad-import-order\nimport functools\nimport traceback\n\nimport six\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.estimator import util as estimator_util\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import tf_contextlib\n\n__all__ = [\"AUTO_REUSE\", \"VariableScope\", \"get_variable_scope\",\n \"get_variable\", \"get_local_variable\", \"variable_scope\",\n \"variable_op_scope\", \"no_regularizer\"]\n\n\nclass _PartitionInfo(object):\n \"\"\"Holds partition info used by initializer functions.\n \"\"\"\n\n def __init__(self, full_shape, var_offset):\n \"\"\"Constructor.\n\n Args:\n full_shape: Tuple or list of `int` indicating the full combined shape\n of the partitioned variables.\n var_offset: Tuple or list of `int` specifying offset of this partition\n with respect to the full variable for each dimension.\n\n Raises:\n TypeError: If `full_shape` or `var_offset` is not a sequence.\n ValueError: If `full_shape` or `var_offset` differ in length. If\n `var_offset` exceeds `full_shape` in any dimension.\n \"\"\"\n if not isinstance(full_shape, collections_lib.Sequence) or isinstance(\n full_shape, six.string_types):\n raise TypeError(\n \"`full_shape` must be a sequence (like tuple or list) instead of \" +\n type(full_shape).__name__)\n\n if not isinstance(var_offset, collections_lib.Sequence) or isinstance(\n var_offset, six.string_types):\n raise TypeError(\n \"`var_offset` must be a sequence (like tuple or list) instead of \" +\n type(var_offset).__name__)\n\n if len(var_offset) != len(full_shape):\n raise ValueError(\n \"Expected equal length, but `var_offset` is of length {} while \"\n \"full_shape is of length {}.\".format(\n len(var_offset), len(full_shape)))\n\n for i in xrange(len(full_shape)):\n offset = var_offset[i]\n shape = full_shape[i]\n if offset < 0 or offset >= shape:\n raise ValueError(\n \"Expected 0 <= offset < shape but found offset={}, shape={} for \"\n \"var_offset={}, full_shape={}\".format(offset, shape, var_offset,\n full_shape))\n\n self._full_shape = full_shape\n self._var_offset = var_offset\n\n @property\n def full_shape(self):\n return self._full_shape\n\n @property\n def var_offset(self):\n return self._var_offset\n\n def single_offset(self, shape):\n \"\"\"Returns the offset when the variable is partitioned in at most one dim.\n\n Args:\n shape: Tuple or list of `int` indicating the shape of one specific\n variable partition.\n\n Returns:\n `int` representing the offset in the dimension along which the variable is\n partitioned. Returns 0 if the variable is not being partitioned.\n\n Raises:\n ValueError: Depending on self.single_slice_dim().\n \"\"\"\n\n single_slice_dim = self.single_slice_dim(shape)\n # If this variable is not being partitioned at all, single_slice_dim() could\n # return None.\n if single_slice_dim is None:\n return 0\n return self.var_offset[single_slice_dim]\n\n def single_slice_dim(self, shape):\n \"\"\"Returns the slice dim when the variable is partitioned only in one dim.\n\n Args:\n shape: Tuple or list of `int` indicating the shape of one specific\n variable partition.\n\n Returns:\n `int` representing the dimension that the variable is partitioned in, or\n `None` if the variable doesn't seem to be partitioned at all.\n\n Raises:\n TypeError: If `shape` is not a sequence.\n ValueError: If `shape` is not the same length as `self.full_shape`. If\n the variable is partitioned in more than one dimension.\n \"\"\"\n if not isinstance(shape, collections_lib.Sequence) or isinstance(\n shape, six.string_types):\n raise TypeError(\n \"`shape` must be a sequence (like tuple or list) instead of \" +\n type(shape).__name__)\n\n if len(shape) != len(self.full_shape):\n raise ValueError(\n \"Expected equal length, but received shape={} of length {} while \"\n \"self.full_shape={} is of length {}.\".format(shape, len(\n shape), self.full_shape, len(self.full_shape)))\n\n for i in xrange(len(shape)):\n if self.var_offset[i] + shape[i] > self.full_shape[i]:\n raise ValueError(\n \"With self.var_offset={}, a partition of shape={} would exceed \"\n \"self.full_shape={} in dimension {}.\".format(\n self.var_offset, shape, self.full_shape, i))\n\n slice_dim = None\n for i in xrange(len(shape)):\n if shape[i] == self.full_shape[i]:\n continue\n if slice_dim is not None:\n raise ValueError(\n \"Cannot use single_slice_dim() with shape={} and \"\n \"self.full_shape={} since slice dim could be either dimension {} \"\n \"or {}.\".format(shape, self.full_shape, i, slice_dim))\n slice_dim = i\n\n return slice_dim\n\n\nclass _ReuseMode(enum.Enum):\n \"\"\"Mode for variable access within a variable scope.\"\"\"\n\n # Indicates that variables are to be fetched if they already exist or\n # otherwise created.\n AUTO_REUSE = 1\n\n # TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of\n # enum values.\n # REUSE_FALSE = 2\n # REUSE_TRUE = 3\n\nAUTO_REUSE = _ReuseMode.AUTO_REUSE\nAUTO_REUSE.__doc__ = \"\"\"\nWhen passed in as the value for the `reuse` flag, AUTO_REUSE indicates that\nget_variable() should create the requested variable if it doesn't exist or, if\nit does exist, simply return it.\n\"\"\"\n\n\nclass _VariableStore(object):\n \"\"\"Variable store that carries a number of named Variables.\n\n New variable names and new variables can be created; all stored\n variables are initialized with the initializer passed to __init__.\n\n Attributes:\n vars: a dictionary with string names (same as passed in GetVar) as keys\n and the corresponding TensorFlow Variables as values.\n \"\"\"\n\n def __init__(self):\n \"\"\"Create a variable store.\"\"\"\n self._vars = {} # A dictionary of the stored TensorFlow variables.\n self._partitioned_vars = {} # A dict of the stored PartitionedVariables.\n self.variable_scopes_count = {} # Count re-used variable scopes.\n\n def open_variable_scope(self, scope_name):\n if scope_name in self.variable_scopes_count:\n self.variable_scopes_count[scope_name] += 1\n else:\n self.variable_scopes_count[scope_name] = 1\n\n def close_variable_subscopes(self, scope_name):\n for k in self.variable_scopes_count:\n if not scope_name or k.startswith(scope_name + \"/\"):\n self.variable_scopes_count[k] = 0\n\n def variable_scope_count(self, scope_name):\n return self.variable_scopes_count.get(scope_name, 0)\n\n def get_variable(self, name, shape=None, dtype=dtypes.float32,\n initializer=None, regularizer=None, reuse=None,\n trainable=True, collections=None, caching_device=None,\n partitioner=None, validate_shape=True, use_resource=None,\n custom_getter=None, constraint=None):\n \"\"\"Gets an existing variable with these parameters or create a new one.\n\n If a variable with the given name is already stored, we return the stored\n variable. Otherwise, we create a new one.\n\n Set `reuse` to `True` when you only want to reuse existing Variables.\n Set `reuse` to `False` when you only want to create new Variables.\n Set `reuse` to None (the default) or tf.AUTO_REUSE when you want\n variables to be created if they don't exist or returned if they do.\n\n If initializer is `None` (the default), the default initializer passed in\n the constructor is used. If that one is `None` too, we use a new\n `glorot_uniform_initializer`. If initializer is a Tensor, we use\n it as a value and derive the shape from the initializer.\n\n If a partitioner is provided, a `PartitionedVariable` is returned.\n Accessing this object as a `Tensor` returns the shards concatenated along\n the partition axis.\n\n Some useful partitioners are available. See, e.g.,\n `variable_axis_size_partitioner` and `min_max_variable_partitioner`.\n\n Args:\n name: The name of the new or existing variable.\n shape: Shape of the new or existing variable.\n dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).\n initializer: Initializer for the variable.\n regularizer: A (Tensor -> Tensor or None) function; the result of\n applying it on a newly created variable will be added to the collection\n GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.\n reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation\n of variables. When eager execution is enabled this argument is always\n forced to be False.\n trainable: If `True` also add the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n collections: List of graph collections keys to add the `Variable` to.\n Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).\n caching_device: Optional device string or function describing where the\n Variable should be cached for reading. Defaults to the Variable's\n device. If not `None`, caches on another device. Typical use is to\n cache on the device where the Ops using the `Variable` reside, to\n deduplicate copying through `Switch` and other conditional statements.\n partitioner: Optional callable that accepts a fully defined `TensorShape`\n and dtype of the `Variable` to be created, and returns a list of\n partitions for each axis (currently only one axis can be partitioned).\n validate_shape: If False, allows the variable to be initialized with a\n value of unknown shape. If True, the default, the shape of initial_value\n must be known.\n use_resource: If False, creates a regular Variable. If True, creates\n instead an experimental ResourceVariable which has well-defined\n semantics. Defaults to False (will later change to True).\n When eager execution is enabled this argument is always forced to be\n true.\n custom_getter: Callable that takes as a first argument the true getter,\n and allows overwriting the internal get_variable method.\n The signature of `custom_getter` should match that of this method,\n but the most future-proof version will allow for changes:\n `def custom_getter(getter, *args, **kwargs)`. Direct access to\n all `get_variable` parameters is also allowed:\n `def custom_getter(getter, name, *args, **kwargs)`. A simple identity\n custom getter that simply creates variables with modified names is:\n ```python\n def custom_getter(getter, name, *args, **kwargs):\n return getter(name + '_suffix', *args, **kwargs)\n ```\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n\n Returns:\n The created or existing `Variable` (or `PartitionedVariable`, if a\n partitioner was used).\n\n Raises:\n ValueError: when creating a new variable and shape is not declared,\n when reusing a variable and specifying a conflicting shape,\n or when violating reuse during variable creation.\n \"\"\"\n if custom_getter is not None and not callable(custom_getter):\n raise ValueError(\n \"Passed a custom_getter which is not callable: %s\" % custom_getter)\n\n if context.in_eager_mode():\n reuse = False\n use_resource = True\n\n # If a *_ref type is passed in an error would be triggered further down the\n # stack. We prevent this using base_dtype to get a non-ref version of the\n # type, before doing anything else. When _ref types are removed in favor of\n # resources, this line can be removed.\n try:\n dtype = dtype.base_dtype\n except AttributeError:\n # .base_dtype not existing means that we will try and use the raw dtype\n # which was passed in - this might be a NumPy type which is valid.\n pass\n\n # This is the main logic of get_variable. However, custom_getter\n # may override this logic. So we save it as a callable and pass\n # it to custom_getter.\n # Note: the parameters of _true_getter, and their documentation, match\n # *exactly* item-for-item with the docstring of this method.\n def _true_getter(name, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring\n initializer=None, regularizer=None, reuse=None,\n trainable=True, collections=None, caching_device=None,\n partitioner=None, validate_shape=True, use_resource=None,\n constraint=None):\n is_scalar = (shape is not None\n and isinstance(shape, collections_lib.Sequence)\n and not shape)\n # Partitioned variable case\n if partitioner is not None and not is_scalar:\n if not callable(partitioner):\n raise ValueError(\n \"Partitioner must be callable, but received: %s\" % partitioner)\n with ops.name_scope(None):\n return self._get_partitioned_variable(name=name,\n shape=shape,\n dtype=dtype,\n initializer=initializer,\n regularizer=regularizer,\n reuse=reuse,\n trainable=trainable,\n collections=collections,\n caching_device=caching_device,\n partitioner=partitioner,\n validate_shape=validate_shape,\n use_resource=use_resource,\n constraint=constraint)\n\n # Special case for partitioned variable to allow reuse without having to\n # specify partitioner.\n if (reuse is True and partitioner is None\n and name in self._partitioned_vars):\n return self._get_partitioned_variable(name=name,\n shape=shape,\n dtype=dtype,\n initializer=initializer,\n regularizer=regularizer,\n reuse=reuse,\n trainable=trainable,\n collections=collections,\n caching_device=caching_device,\n partitioner=None,\n validate_shape=validate_shape,\n use_resource=use_resource,\n constraint=constraint)\n\n # Single variable case\n if \"%s/part_0\" % name in self._vars:\n raise ValueError(\n \"No partitioner was provided, but a partitioned version of the \"\n \"variable was found: %s/part_0. Perhaps a variable of the same \"\n \"name was already created with partitioning?\" % name)\n\n return self._get_single_variable(\n name=name, shape=shape, dtype=dtype,\n initializer=initializer, regularizer=regularizer, reuse=reuse,\n trainable=trainable, collections=collections,\n caching_device=caching_device, validate_shape=validate_shape,\n use_resource=use_resource, constraint=constraint)\n\n if custom_getter is not None:\n # Handle backwards compatibility with getter arguments that were added\n # to the API after users started writing custom getters.\n custom_getter_kwargs = {\n \"getter\": _true_getter,\n \"name\": name,\n \"shape\": shape,\n \"dtype\": dtype,\n \"initializer\": initializer,\n \"regularizer\": regularizer,\n \"reuse\": reuse,\n \"trainable\": trainable,\n \"collections\": collections,\n \"caching_device\": caching_device,\n \"partitioner\": partitioner,\n \"validate_shape\": validate_shape,\n \"use_resource\": use_resource,\n }\n # `fn_args` can handle functions, `functools.partial`, `lambda`.\n if \"constraint\" in estimator_util.fn_args(custom_getter):\n custom_getter_kwargs[\"constraint\"] = constraint\n return custom_getter(**custom_getter_kwargs)\n else:\n return _true_getter(\n name, shape=shape, dtype=dtype,\n initializer=initializer, regularizer=regularizer,\n reuse=reuse, trainable=trainable, collections=collections,\n caching_device=caching_device, partitioner=partitioner,\n validate_shape=validate_shape, use_resource=use_resource,\n constraint=constraint)\n\n def _get_partitioned_variable(\n self, name, partitioner, shape=None, dtype=dtypes.float32,\n initializer=None, regularizer=None, reuse=None,\n trainable=True, collections=None, caching_device=None,\n validate_shape=True, use_resource=None, constraint=None):\n \"\"\"Gets or creates a sharded variable list with these parameters.\n\n The `partitioner` must be a callable that accepts a fully defined\n `TensorShape` and returns a sequence of integers (the `partitions`).\n These integers describe how to partition the given sharded `Variable`\n along the given dimension. That is, `partitions[1] = 3` means split\n the `Variable` into 3 shards along dimension 1. Currently, sharding along\n only one axis is supported.\n\n If the list of variables with the given name (prefix) is already stored,\n we return the stored variables. Otherwise, we create a new one.\n\n Set `reuse` to `True` when you only want to reuse existing Variables.\n Set `reuse` to `False` when you only want to create new Variables.\n Set `reuse` to None (the default) or tf.AUTO_REUSE when you want\n variables to be created if they don't exist or returned if they do.\n\n If initializer is `None` (the default), the default initializer passed in\n the constructor is used. If that one is `None` too, we use a new\n `glorot_uniform_initializer`. If initializer is a Tensor, we use\n it as a value and derive the shape from the initializer.\n\n If the initializer is a callable, then it will be called for each\n shard. Otherwise the initializer should match the shape of the entire\n sharded Variable, and it will be sliced accordingly for each shard.\n\n Some useful partitioners are available. See, e.g.,\n `variable_axis_size_partitioner` and `min_max_variable_partitioner`.\n\n Args:\n name: the name of the new or existing sharded variable.\n partitioner: Optional callable that accepts a fully defined `TensorShape`\n and `dtype` of the Variable to be created, and returns a list of\n partitions for each axis (currently only one axis can be partitioned).\n shape: shape of the new or existing sharded variable.\n dtype: type of the new or existing sharded variable\n (defaults to `DT_FLOAT`).\n initializer: initializer for the sharded variable.\n regularizer: a (Tensor -> Tensor or None) function; the result of\n applying it on a newly created variable will be added to the collection\n GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.\n reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation\n of variables.\n trainable: If `True` also add the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n collections: List of graph collections keys to add the Variable to.\n Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).\n caching_device: Optional device string or function describing where the\n Variable should be cached for reading. Defaults to the Variable's\n device. If not `None`, caches on another device. Typical use is to\n cache on the device where the Ops using the Variable reside, to\n deduplicate copying through `Switch` and other conditional statements.\n validate_shape: If False, allows the variable to be initialized with a\n value of unknown shape. If True, the default, the shape of initial_value\n must be known.\n use_resource: If False, creates a regular Variable. If True, creates an\n experimental ResourceVariable which has well-defined semantics. Defaults\n to False (will later change to True).\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n\n Returns:\n A `PartitionedVariable` object.\n\n Raises:\n ValueError: when creating a new variable and shape is not declared,\n when reusing a variable and specifying a conflicting shape,\n when violating reuse during variable creation, or if an existing\n sharded variable exists for the given name but with different sharding.\n \"\"\"\n if context.in_eager_mode():\n raise NotImplementedError(\"Partitioned variables are not yet supported \"\n \"when eager execution is enabled.\")\n\n initializing_from_value = initializer is not None and isinstance(\n initializer, ops.Tensor)\n reuse_without_partition = reuse and not partitioner\n\n if name in self._vars:\n raise ValueError(\n \"A partitioner was provided, but an unpartitioned version of the \"\n \"variable was found: %s. Perhaps a variable of the same name was \"\n \"already created without partitioning?\" % name)\n\n shape = tensor_shape.as_shape(shape)\n if initializing_from_value:\n shape = shape.merge_with(initializer.get_shape())\n\n if not reuse_without_partition:\n if not shape.is_fully_defined():\n raise ValueError(\"Shape of a new partitioned variable (%s) must be \"\n \"fully defined, but instead was %s.\" % (name, shape))\n\n if shape.ndims < 1:\n raise ValueError(\"A partitioned Variable must have rank at least 1, \"\n \"shape: %s\" % shape)\n\n partitions = partitioner(shape=shape, dtype=dtype)\n\n if not isinstance(partitions, collections_lib.Sequence):\n raise ValueError(\"Partitioner must return a sequence, but saw: %s\"\n % partitions)\n\n if len(partitions) != shape.ndims:\n raise ValueError(\n \"Partitioner returned a partition list that does not match the \"\n \"Variable's rank: %s vs. %s\" % (partitions, shape))\n\n if any([p < 1 for p in partitions]):\n raise ValueError(\n \"Partitioner returned zero partitions for some axes: %s\" %\n partitions)\n\n if name in self._partitioned_vars:\n if reuse is False:\n raise ValueError(\n \"Partitioned variable with name %s already exists. Did you mean to \"\n \"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?\"\n % name)\n\n existing_var = self._partitioned_vars[name]\n if not shape.is_compatible_with(existing_var.get_shape()):\n raise ValueError(\n \"Trying to reuse partitioned variable %s, but specified shape %s \"\n \"and found shape %s.\"\n % (name, shape, existing_var.get_shape()))\n if not dtype.is_compatible_with(existing_var.dtype):\n raise ValueError(\n \"Trying to reuse partitioned variable %s, but specified dtype %s \"\n \"and found dtype %s.\"\n % (name, dtype.name, existing_var.dtype.name))\n\n # pylint: disable=protected-access\n if (not reuse_without_partition and\n existing_var._get_partitions() != partitions):\n raise ValueError(\n \"Trying to reuse partitioned variable %s, but specified partitions \"\n \"%s and found partitions %s.\" %\n (name, partitions, existing_var._get_partitions()))\n # pylint: enable=protected-access\n\n return existing_var\n\n if reuse is True:\n raise ValueError(\"PartitionedVariable %s does not exist, or was not \"\n \"created with tf.get_variable(). Did you mean to set \"\n \"reuse=None in VarScope?\" % name)\n\n slice_dim, slice_shape = _compute_slice_dim_and_shape(\n shape.as_list(), partitions)\n\n vs = []\n num_slices = partitions[slice_dim]\n num_slices_with_excess = shape[slice_dim].value % num_slices\n\n slice_offset = [0] * shape.ndims\n\n if \"%s/part_0\" % name in self._vars:\n if \"%s/part_%d\" % (name, num_slices - 1) not in self._vars:\n raise ValueError(\n \"Partitioner returned a different partitioning than what was \"\n \"already found. Partitioner returned %d shards, and shard \"\n \"%s/part_0 was found, but %s/part_%d was not.\"\n % (num_slices, name, name, num_slices - 1))\n if \"%s/part_%d\" % (name, num_slices) in self._vars:\n raise ValueError(\n \"Partitioner returned a different partitioning than what was \"\n \"already found. Partitioner returned %d shards, and shard \"\n \"%s/part_0 was found, but so was the extra shard %s/part_%d.\"\n % (num_slices, name, name, num_slices))\n\n for i in xrange(num_slices):\n var_shape = slice_shape[:]\n var_offset = slice_offset[:]\n partition_info = _PartitionInfo(\n full_shape=shape.as_list(), var_offset=var_offset)\n if i < num_slices_with_excess:\n var_shape[slice_dim] += 1\n slice_offset[slice_dim] += var_shape[slice_dim]\n\n var_full_name = \"%s/part_%d\" % (name, i)\n with ops.name_scope(var_full_name + \"/PartitionedInitializer\"):\n # Create the tensor to initialize the variable with default value.\n if initializer is None:\n init, initializing_from_value = self._get_default_initializer(\n name=name, shape=shape, dtype=dtype)\n if initializing_from_value:\n init_shape = None\n else:\n init_shape = var_shape\n elif callable(initializer):\n init = initializer\n init_shape = var_shape\n elif isinstance(initializer, ops.Tensor):\n init = array_ops.slice(initializer, var_offset, var_shape)\n # Use the dtype of the given tensor.\n dtype = init.dtype.base_dtype\n init_shape = None\n else:\n init = ops.convert_to_tensor(initializer, dtype=dtype)\n init = array_ops.slice(init, var_offset, var_shape)\n init_shape = None\n\n with ops.name_scope(None):\n var = self._get_single_variable(\n name=var_full_name,\n shape=init_shape,\n dtype=dtype,\n initializer=init,\n partition_info=partition_info,\n regularizer=regularizer,\n reuse=reuse,\n trainable=trainable,\n collections=collections,\n caching_device=caching_device,\n validate_shape=validate_shape,\n use_resource=use_resource,\n constraint=constraint)\n\n # pylint: disable=protected-access\n var._set_save_slice_info(variables.Variable.SaveSliceInfo(\n name, shape.as_list(), var_offset, var_shape))\n vs.append(var)\n # pylint: enable=protected-access\n\n # pylint: disable=protected-access\n partitioned_var = variables.PartitionedVariable(name=name,\n shape=shape,\n dtype=dtype,\n variable_list=vs,\n partitions=partitions)\n # pylint: enable=protected-access\n\n self._partitioned_vars[name] = partitioned_var\n return partitioned_var\n\n def _get_single_variable(self,\n name,\n shape=None,\n dtype=dtypes.float32,\n initializer=None,\n regularizer=None,\n partition_info=None,\n reuse=None,\n trainable=True,\n collections=None,\n caching_device=None,\n validate_shape=True,\n use_resource=None,\n constraint=None):\n \"\"\"Get or create a single Variable (e.g. a shard or entire variable).\n\n See the documentation of get_variable above (ignore partitioning components)\n for details.\n\n Args:\n name: see get_variable.\n shape: see get_variable.\n dtype: see get_variable.\n initializer: see get_variable.\n regularizer: see get_variable.\n partition_info: _PartitionInfo object.\n reuse: see get_variable.\n trainable: see get_variable.\n collections: see get_variable.\n caching_device: see get_variable.\n validate_shape: see get_variable.\n use_resource: see get_variable.\n constraint: see get_variable.\n\n Returns:\n A Variable. See documentation of get_variable above.\n\n Raises:\n ValueError: See documentation of get_variable above.\n \"\"\"\n # Set to true if initializer is a constant.\n initializing_from_value = False\n if initializer is not None and not callable(initializer):\n initializing_from_value = True\n if shape is not None and initializing_from_value:\n raise ValueError(\"If initializer is a constant, do not specify shape.\")\n\n dtype = dtypes.as_dtype(dtype)\n shape = tensor_shape.as_shape(shape)\n\n if name in self._vars:\n # Here we handle the case when returning an existing variable.\n if reuse is False:\n tb = self._vars[name].op.traceback[::-1]\n # Throw away internal tf entries and only take a few lines.\n tb = [x for x in tb if \"tensorflow/python\" not in x[0]][:3]\n raise ValueError(\"Variable %s already exists, disallowed.\"\n \" Did you mean to set reuse=True or \"\n \"reuse=tf.AUTO_REUSE in VarScope? \"\n \"Originally defined at:\\n\\n%s\" % (\n name, \"\".join(traceback.format_list(tb))))\n found_var = self._vars[name]\n if not shape.is_compatible_with(found_var.get_shape()):\n raise ValueError(\"Trying to share variable %s, but specified shape %s\"\n \" and found shape %s.\" % (name, shape,\n found_var.get_shape()))\n if not dtype.is_compatible_with(found_var.dtype):\n dtype_str = dtype.name\n found_type_str = found_var.dtype.name\n raise ValueError(\"Trying to share variable %s, but specified dtype %s\"\n \" and found dtype %s.\" % (name, dtype_str,\n found_type_str))\n return found_var\n\n # The code below handles only the case of creating a new variable.\n if reuse is True:\n raise ValueError(\"Variable %s does not exist, or was not created with \"\n \"tf.get_variable(). Did you mean to set \"\n \"reuse=tf.AUTO_REUSE in VarScope?\" % name)\n if not shape.is_fully_defined() and not initializing_from_value:\n raise ValueError(\"Shape of a new variable (%s) must be fully defined, \"\n \"but instead was %s.\" % (name, shape))\n\n # Create the tensor to initialize the variable with default value.\n if initializer is None:\n initializer, initializing_from_value = self._get_default_initializer(\n name=name, shape=shape, dtype=dtype)\n # Clear control dependencies while creating the initializer.\n with ops.control_dependencies(None):\n if initializing_from_value:\n init_val = initializer\n variable_dtype = None\n else:\n # Instantiate initializer if provided initializer is a type object.\n if isinstance(initializer, type(init_ops.Initializer)):\n initializer = initializer(dtype=dtype)\n init_val = lambda: initializer( # pylint: disable=g-long-lambda\n shape.as_list(), dtype=dtype, partition_info=partition_info)\n variable_dtype = dtype.base_dtype\n\n # Create the variable.\n if use_resource is None:\n # Set the default value if unspecified.\n use_resource = False\n if use_resource:\n v = resource_variable_ops.ResourceVariable(\n initial_value=init_val,\n name=name,\n trainable=trainable,\n collections=collections,\n caching_device=caching_device,\n dtype=variable_dtype,\n validate_shape=validate_shape,\n constraint=constraint)\n else:\n v = variables.Variable(\n initial_value=init_val,\n name=name,\n trainable=trainable,\n collections=collections,\n caching_device=caching_device,\n dtype=variable_dtype,\n validate_shape=validate_shape,\n constraint=constraint)\n if context.in_graph_mode():\n # In eager mode we do not want to keep default references to Variable\n # objects as this will prevent their memory from being released.\n self._vars[name] = v\n logging.vlog(1, \"Created variable %s with shape %s and init %s\", v.name,\n format(shape), initializer)\n\n # Run the regularizer if requested and save the resulting loss.\n if regularizer:\n with ops.colocate_with(v):\n with ops.name_scope(name + \"/Regularizer/\"):\n loss = regularizer(v)\n if loss is not None:\n if context.in_graph_mode():\n v_name = v.name\n loss_name = loss.name\n else:\n v_name = \"v_%s\" % type(v)\n loss_name = \"loss_%s\" % type(loss)\n logging.vlog(1, \"Applied regularizer to %s and added the result %s \"\n \"to REGULARIZATION_LOSSES.\", v_name, loss_name)\n ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)\n return v\n\n # Initialize variable when no initializer provided\n def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):\n \"\"\"Provide a default initializer and a corresponding value.\n\n Args:\n name: see get_variable.\n shape: see get_variable.\n dtype: see get_variable.\n\n Returns:\n initializer and initializing_from_value. See get_variable above.\n\n Raises:\n ValueError: When giving unsupported dtype.\n \"\"\"\n # If dtype is DT_FLOAT, provide a uniform unit scaling initializer\n if dtype.is_floating:\n initializer = init_ops.glorot_uniform_initializer()\n initializing_from_value = False\n # If dtype is DT_INT/DT_UINT, provide a default value `zero`\n # If dtype is DT_BOOL, provide a default value `FALSE`\n elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:\n initializer = init_ops.zeros_initializer()(\n shape=shape, dtype=dtype.base_dtype)\n initializing_from_value = True\n # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?\n else:\n raise ValueError(\"An initializer for variable %s of %s is required\"\n % (name, dtype.base_dtype))\n\n return initializer, initializing_from_value\n\n\n# To stop regularization, use this regularizer\ndef no_regularizer(_):\n \"\"\"Use this function to prevent regularization of variables.\"\"\"\n return None\n\n\n# TODO(alive): support caching devices and partitioned variables in Eager mode.\nclass VariableScope(object):\n \"\"\"Variable scope object to carry defaults to provide to `get_variable`.\n\n Many of the arguments we need for `get_variable` in a variable store are most\n easily handled with a context. This object is used for the defaults.\n\n Attributes:\n name: name of the current scope, used as prefix in get_variable.\n initializer: default initializer passed to get_variable.\n regularizer: default regularizer passed to get_variable.\n reuse: Boolean, None, or tf.AUTO_REUSE, setting the reuse in\n get_variable. When eager execution is enabled this argument is always\n forced to be False.\n caching_device: string, callable, or None: the caching device passed to\n get_variable.\n partitioner: callable or `None`: the partitioner passed to `get_variable`.\n custom_getter: default custom getter passed to get_variable.\n name_scope: The name passed to `tf.name_scope`.\n dtype: default type passed to get_variable (defaults to DT_FLOAT).\n use_resource: if False, create a normal Variable; if True create an\n experimental ResourceVariable with well-defined semantics. Defaults\n to False (will later change to True). When eager execution is enabled\n this argument is always forced to be True.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n \"\"\"\n\n def __init__(self,\n reuse,\n name=\"\",\n initializer=None,\n regularizer=None,\n caching_device=None,\n partitioner=None,\n custom_getter=None,\n name_scope=\"\",\n dtype=dtypes.float32,\n use_resource=None,\n constraint=None):\n \"\"\"Creates a new VariableScope with the given properties.\"\"\"\n self._name = name\n self._initializer = initializer\n self._regularizer = regularizer\n self._reuse = reuse\n self._caching_device = caching_device\n self._partitioner = partitioner\n self._custom_getter = custom_getter\n self._name_scope = name_scope\n self._dtype = dtype\n self._use_resource = use_resource\n self._constraint = constraint\n if context.in_eager_mode():\n if self._caching_device is not None:\n raise NotImplementedError(\"Caching devices is not yet supported \"\n \"when eager execution is enabled.\")\n if self._partitioner is not None:\n raise NotImplementedError(\"Partitioned variables are not yet supported \"\n \"when eager execution is enabled.\")\n self._reuse = AUTO_REUSE\n self._use_resource = True\n\n @property\n def name(self):\n return self._name\n\n @property\n def original_name_scope(self):\n return self._name_scope\n\n @property\n def reuse(self):\n return self._reuse\n\n @property\n def initializer(self):\n return self._initializer\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def use_resource(self):\n return self._use_resource\n\n @property\n def regularizer(self):\n return self._regularizer\n\n @property\n def caching_device(self):\n return self._caching_device\n\n @property\n def partitioner(self):\n return self._partitioner\n\n @property\n def custom_getter(self):\n return self._custom_getter\n\n @property\n def constraint(self):\n return self._constraint\n\n def reuse_variables(self):\n \"\"\"Reuse variables in this scope.\"\"\"\n self._reuse = True\n\n def set_initializer(self, initializer):\n \"\"\"Set initializer for this scope.\"\"\"\n self._initializer = initializer\n\n def set_dtype(self, dtype):\n \"\"\"Set data type for this scope.\"\"\"\n self._dtype = dtype\n\n def set_use_resource(self, use_resource):\n \"\"\"Sets whether to use ResourceVariables for this scope.\"\"\"\n if context.in_eager_mode() and not use_resource:\n raise ValueError(\"When eager execution is enabled, \"\n \"use_resource cannot be set to false.\")\n self._use_resource = use_resource\n\n def set_regularizer(self, regularizer):\n \"\"\"Set regularizer for this scope.\"\"\"\n self._regularizer = regularizer\n\n def set_caching_device(self, caching_device):\n \"\"\"Set caching_device for this scope.\"\"\"\n if context.in_eager_mode():\n raise NotImplementedError(\"Caching devices are not yet supported \"\n \"when eager execution is enabled.\")\n self._caching_device = caching_device\n\n def set_partitioner(self, partitioner):\n \"\"\"Set partitioner for this scope.\"\"\"\n if partitioner and context.in_eager_mode():\n raise NotImplementedError(\"Partitioned variables are not yet supported \"\n \"when eager execution is enabled.\")\n self._partitioner = partitioner\n\n def set_custom_getter(self, custom_getter):\n \"\"\"Set custom getter for this scope.\"\"\"\n self._custom_getter = custom_getter\n\n def get_collection(self, name):\n \"\"\"Get this scope's variables.\"\"\"\n scope = self._name + \"/\" if self._name else \"\"\n return ops.get_collection(name, scope)\n\n def trainable_variables(self):\n \"\"\"Get this scope's trainable variables.\"\"\"\n return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)\n\n def global_variables(self):\n \"\"\"Get this scope's global variables.\"\"\"\n return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)\n\n def local_variables(self):\n \"\"\"Get this scope's local variables.\"\"\"\n return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)\n\n def get_variable(self,\n var_store,\n name,\n shape=None,\n dtype=None,\n initializer=None,\n regularizer=None,\n reuse=None,\n trainable=True,\n collections=None,\n caching_device=None,\n partitioner=None,\n validate_shape=True,\n use_resource=None,\n custom_getter=None,\n constraint=None):\n \"\"\"Gets an existing variable with this name or create a new one.\"\"\"\n if regularizer is None:\n regularizer = self._regularizer\n if caching_device is None:\n caching_device = self._caching_device\n if partitioner is None:\n partitioner = self._partitioner\n if custom_getter is None:\n custom_getter = self._custom_getter\n if context.in_graph_mode():\n if reuse is None:\n reuse = self._reuse\n if use_resource is None:\n use_resource = self._use_resource\n else:\n reuse = False\n use_resource = True\n\n full_name = self.name + \"/\" + name if self.name else name\n # Variable names only depend on variable_scope (full_name here),\n # not name_scope, so we reset it below for the time of variable creation.\n with ops.name_scope(None):\n # Check that `initializer` dtype and `dtype` are consistent before\n # replacing them with defaults.\n if (dtype is not None and initializer is not None and\n not callable(initializer)):\n init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype\n if init_dtype != dtype:\n raise ValueError(\"Initializer type '%s' and explicit dtype '%s' \"\n \"don't match.\" % (init_dtype, dtype))\n if initializer is None:\n initializer = self._initializer\n if constraint is None:\n constraint = self._constraint\n if dtype is None:\n dtype = self._dtype\n return var_store.get_variable(\n full_name, shape=shape, dtype=dtype, initializer=initializer,\n regularizer=regularizer, reuse=reuse, trainable=trainable,\n collections=collections, caching_device=caching_device,\n partitioner=partitioner, validate_shape=validate_shape,\n use_resource=use_resource, custom_getter=custom_getter,\n constraint=constraint)\n\n def _get_partitioned_variable(self,\n var_store,\n name,\n shape=None,\n dtype=None,\n initializer=None,\n regularizer=None,\n trainable=True,\n collections=None,\n caching_device=None,\n partitioner=None,\n validate_shape=True,\n use_resource=None,\n constraint=None):\n \"\"\"Gets an existing variable with this name or create a new one.\"\"\"\n if context.in_eager_mode():\n raise NotImplementedError(\"Partitioned variables are not yet supported \"\n \"when eager execution is enabled.\")\n if initializer is None:\n initializer = self._initializer\n if regularizer is None:\n regularizer = self._regularizer\n if constraint is None:\n constraint = self._constraint\n if caching_device is None:\n caching_device = self._caching_device\n if partitioner is None:\n partitioner = self._partitioner\n if dtype is None:\n dtype = self._dtype\n if use_resource is None:\n use_resource = self._use_resource\n\n if self._custom_getter is not None:\n raise ValueError(\n \"Private access to _get_partitioned_variable is not allowed when \"\n \"a custom getter is set. Current custom getter: %s. \"\n \"It is likely that you're using create_partitioned_variables. \"\n \"If so, consider instead using get_variable with a non-empty \"\n \"partitioner parameter instead.\" % self._custom_getter)\n\n if partitioner is None:\n raise ValueError(\"No partitioner was specified\")\n\n # This allows the variable scope name to be used as the variable name if\n # this function is invoked with an empty name arg, for backward\n # compatibility with create_partitioned_variables().\n full_name_list = []\n if self.name:\n full_name_list.append(self.name)\n if name:\n full_name_list.append(name)\n full_name = \"/\".join(full_name_list)\n\n # Variable names only depend on variable_scope (full_name here),\n # not name_scope, so we reset it below for the time of variable creation.\n with ops.name_scope(None):\n # pylint: disable=protected-access\n return var_store._get_partitioned_variable(\n full_name, shape=shape, dtype=dtype, initializer=initializer,\n regularizer=regularizer, reuse=self.reuse, trainable=trainable,\n collections=collections, caching_device=caching_device,\n partitioner=partitioner, validate_shape=validate_shape,\n use_resource=use_resource, constraint=constraint)\n # pylint: enable=protected-access\n\n\n_VARSTORE_KEY = (\"__variable_store\",)\n_VARSCOPE_KEY = (\"__varscope\",)\n\n\ndef get_variable_scope():\n \"\"\"Returns the current variable scope.\"\"\"\n scope = ops.get_collection(_VARSCOPE_KEY)\n if scope: # This collection has at most 1 element, the default scope at [0].\n return scope[0]\n scope = VariableScope(False)\n ops.add_to_collection(_VARSCOPE_KEY, scope)\n return scope\n\n\ndef _get_default_variable_store():\n store = ops.get_collection(_VARSTORE_KEY)\n if store:\n return store[0]\n store = _VariableStore()\n ops.add_to_collection(_VARSTORE_KEY, store)\n return store\n\n\ndef get_variable(name,\n shape=None,\n dtype=None,\n initializer=None,\n regularizer=None,\n trainable=True,\n collections=None,\n caching_device=None,\n partitioner=None,\n validate_shape=True,\n use_resource=None,\n custom_getter=None,\n constraint=None):\n return get_variable_scope().get_variable(\n _get_default_variable_store(), name, shape=shape, dtype=dtype,\n initializer=initializer, regularizer=regularizer, trainable=trainable,\n collections=collections, caching_device=caching_device,\n partitioner=partitioner, validate_shape=validate_shape,\n use_resource=use_resource, custom_getter=custom_getter,\n constraint=constraint)\nget_variable_or_local_docstring = (\n \"\"\"%s\n\n%sThis function prefixes the name with the current variable scope\nand performs reuse checks. See the\n@{$variables$Variable Scope How To}\nfor an extensive description of how reusing works. Here is a basic example:\n\n```python\ndef foo():\n with tf.variable_scope(\"foo\", reuse=tf.AUTO_REUSE):\n v = tf.get_variable(\"v\", [1])\n return v\n\nv1 = foo() # Creates v.\nv2 = foo() # Gets the same, existing v.\nassert v1 == v2\n```\n\nIf initializer is `None` (the default), the default initializer passed in\nthe variable scope will be used. If that one is `None` too, a\n`glorot_uniform_initializer` will be used. The initializer can also be\na Tensor, in which case the variable is initialized to this value and shape.\n\nSimilarly, if the regularizer is `None` (the default), the default regularizer\npassed in the variable scope will be used (if that is `None` too,\nthen by default no regularization is performed).\n\nIf a partitioner is provided, a `PartitionedVariable` is returned.\nAccessing this object as a `Tensor` returns the shards concatenated along\nthe partition axis.\n\nSome useful partitioners are available. See, e.g.,\n`variable_axis_size_partitioner` and `min_max_variable_partitioner`.\n\nArgs:\n name: The name of the new or existing variable.\n shape: Shape of the new or existing variable.\n dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).\n initializer: Initializer for the variable if one is created.\n regularizer: A (Tensor -> Tensor or None) function; the result of\n applying it on a newly created variable will be added to the collection\n @{tf.GraphKeys.REGULARIZATION_LOSSES} and can be used for regularization.\n %scollections: List of graph collections keys to add the Variable to.\n Defaults to `[%s]` (see `tf.Variable`).\n caching_device: Optional device string or function describing where the\n Variable should be cached for reading. Defaults to the Variable's\n device. If not `None`, caches on another device. Typical use is to\n cache on the device where the Ops using the Variable reside, to\n deduplicate copying through `Switch` and other conditional statements.\n partitioner: Optional callable that accepts a fully defined `TensorShape`\n and `dtype` of the Variable to be created, and returns a list of\n partitions for each axis (currently only one axis can be partitioned).\n validate_shape: If False, allows the variable to be initialized with a\n value of unknown shape. If True, the default, the shape of initial_value\n must be known.\n use_resource: If False, creates a regular Variable. If true, creates an\n experimental ResourceVariable instead with well-defined semantics.\n Defaults to False (will later change to True). When eager execution is\n enabled this argument is always forced to be True.\n custom_getter: Callable that takes as a first argument the true getter, and\n allows overwriting the internal get_variable method.\n The signature of `custom_getter` should match that of this method,\n but the most future-proof version will allow for changes:\n `def custom_getter(getter, *args, **kwargs)`. Direct access to\n all `get_variable` parameters is also allowed:\n `def custom_getter(getter, name, *args, **kwargs)`. A simple identity\n custom getter that simply creates variables with modified names is:\n ```python\n def custom_getter(getter, name, *args, **kwargs):\n return getter(name + '_suffix', *args, **kwargs)\n ```\n\nReturns:\n The created or existing `Variable` (or `PartitionedVariable`, if a\n partitioner was used).\n\nRaises:\n ValueError: when creating a new variable and shape is not declared,\n when violating reuse during variable creation, or when `initializer` dtype\n and `dtype` don't match. Reuse is set inside `variable_scope`.\n\"\"\")\nget_variable.__doc__ = get_variable_or_local_docstring % (\n \"Gets an existing variable with these parameters or create a new one.\",\n \"\",\n \"trainable: If `True` also add the variable to the graph collection\\n\"\n \" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\\n \",\n \"GraphKeys.GLOBAL_VARIABLES\")\n\n\[email protected](get_variable)\ndef get_local_variable(*args, **kwargs):\n kwargs[\"trainable\"] = False\n if \"collections\" in kwargs:\n kwargs[\"collections\"] += [ops.GraphKeys.LOCAL_VARIABLES]\n else:\n kwargs[\"collections\"] = [ops.GraphKeys.LOCAL_VARIABLES]\n return get_variable(*args, **kwargs)\nget_local_variable.__doc__ = get_variable_or_local_docstring % (\n \"Gets an existing *local* variable or creates a new one.\",\n \"Behavior is the same as in `get_variable`, except that variables are\\n\"\n \"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\\n\"\n \"`False`.\\n\",\n \"\",\n \"GraphKeys.LOCAL_VARIABLES\")\n\n\ndef _get_partitioned_variable(name,\n shape=None,\n dtype=None,\n initializer=None,\n regularizer=None,\n trainable=True,\n collections=None,\n caching_device=None,\n partitioner=None,\n validate_shape=True,\n use_resource=None,\n constraint=None):\n \"\"\"Gets or creates a sharded variable list with these parameters.\n\n The `partitioner` must be a callable that accepts a fully defined\n `TensorShape` and returns a sequence of integers (the `partitions`).\n These integers describe how to partition the given sharded `Variable`\n along the given dimension. That is, `partitions[1] = 3` means split\n the `Variable` into 3 shards along dimension 1. Currently, sharding along\n only one axis is supported.\n\n If the list of variables with the given name (prefix) is already stored,\n we return the stored variables. Otherwise, we create a new one.\n\n If initializer is `None` (the default), the default initializer passed in\n the constructor is used. If that one is `None` too, we use a new\n `glorot_uniform_initializer`. If initializer is a Tensor, we use\n it as a value and derive the shape from the initializer.\n\n If the initializer is a callable, then it will be called for each\n shard. Otherwise the initializer should match the shape of the entire\n sharded Variable, and it will be sliced accordingly for each shard.\n\n Some useful partitioners are available. See, e.g.,\n `variable_axis_size_partitioner` and `min_max_variable_partitioner`.\n\n Args:\n name: The name of the new or existing variable.\n shape: Shape of the new or existing variable.\n dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).\n initializer: Initializer for the variable if one is created.\n regularizer: A (Tensor -> Tensor or None) function; the result of\n applying it on a newly created variable will be added to the collection\n GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.\n trainable: If `True` also add the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n collections: List of graph collections keys to add the Variable to.\n Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).\n caching_device: Optional device string or function describing where the\n Variable should be cached for reading. Defaults to the Variable's\n device. If not `None`, caches on another device. Typical use is to\n cache on the device where the Ops using the Variable reside, to\n deduplicate copying through `Switch` and other conditional statements.\n partitioner: Optional callable that accepts a fully defined `TensorShape`\n and `dtype` of the Variable to be created, and returns a list of\n partitions for each axis (currently only one axis can be partitioned).\n validate_shape: If False, allows the variable to be initialized with a\n value of unknown shape. If True, the default, the shape of initial_value\n must be known.\n use_resource: If False, creates a regular Variable. If True, creates an\n experimental ResourceVariable instead which has well-defined semantics.\n Defaults to False (will later change to True).\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n\n Returns:\n A tuple `(shards, partitions)` where `shards` is the list of `Variable`\n shards and `partitions` is the output of the partitioner on the input\n shape.\n\n Raises:\n ValueError: when creating a new variable and shape is not declared,\n or when violating reuse during variable creation. Reuse is set inside\n `variable_scope`.\n \"\"\"\n # pylint: disable=protected-access\n scope = get_variable_scope()\n if scope.custom_getter is not None:\n raise ValueError(\n \"Private access to _get_partitioned_variable is not allowed when \"\n \"a custom getter is set. Current custom getter: %s. \"\n \"It is likely that you're using create_partitioned_variables. \"\n \"If so, consider instead using get_variable with a non-empty \"\n \"partitioner parameter instead.\" % scope.custom_getter)\n return scope._get_partitioned_variable(\n _get_default_variable_store(), name, shape=shape, dtype=dtype,\n initializer=initializer, regularizer=regularizer, trainable=trainable,\n collections=collections, caching_device=caching_device,\n partitioner=partitioner, validate_shape=validate_shape,\n use_resource=use_resource, constraint=constraint)\n # pylint: enable=protected-access\n\n\n# Named like a function for compatibility with the previous\n# @tf_contextlib.contextmanager definition.\nclass _pure_variable_scope(object): # pylint: disable=invalid-name\n \"\"\"A context for the variable_scope, see `variable_scope` for docs.\"\"\"\n\n def __init__(self,\n name_or_scope,\n reuse=None,\n initializer=None,\n regularizer=None,\n caching_device=None,\n partitioner=None,\n custom_getter=None,\n old_name_scope=None,\n dtype=dtypes.float32,\n use_resource=None,\n constraint=None):\n \"\"\"Creates a context for the variable_scope, see `variable_scope` for docs.\n\n Note: this does not create a name scope.\n\n Args:\n name_or_scope: `string` or `VariableScope`: the scope to open.\n reuse: `True` or None, or tf.AUTO_REUSE; if `None`, we inherit the parent\n scope's reuse flag.\n initializer: default initializer for variables within this scope.\n regularizer: default regularizer for variables within this scope.\n caching_device: default caching device for variables within this scope.\n partitioner: default partitioner for variables within this scope.\n custom_getter: default custom getter for variables within this scope.\n old_name_scope: the original name scope when re-entering a variable scope.\n dtype: type of the variables within this scope (defaults to `DT_FLOAT`).\n use_resource: If False, variables in this scope will be regular Variables.\n If True, experimental ResourceVariables will be creates instead, with\n well-defined semantics. Defaults to False (will later change to True).\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n \"\"\"\n self._name_or_scope = name_or_scope\n self._reuse = reuse\n self._initializer = initializer\n self._regularizer = regularizer\n self._caching_device = caching_device\n self._partitioner = partitioner\n self._custom_getter = custom_getter\n self._old_name_scope = old_name_scope\n self._dtype = dtype\n self._use_resource = use_resource\n self._constraint = constraint\n get_variable_scope() # Ensure that a default exists, then get a pointer.\n # Get the reference to the collection as we want to modify it in place.\n self._default_varscope = ops.get_collection_ref(_VARSCOPE_KEY)\n self._var_store = _get_default_variable_store()\n if isinstance(self._name_or_scope, VariableScope):\n self._new_name = self._name_or_scope.name\n name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access\n # Handler for the case when we jump to a shared scope. We create a new\n # VariableScope (self._var_scope_object) that contains a copy of the\n # provided shared scope, possibly with changed reuse and initializer, if\n # the user requested this.\n variable_scope_object = VariableScope(\n self._name_or_scope.reuse if not self._reuse else self._reuse,\n name=self._new_name,\n initializer=self._name_or_scope.initializer,\n regularizer=self._name_or_scope.regularizer,\n caching_device=self._name_or_scope.caching_device,\n partitioner=self._name_or_scope.partitioner,\n dtype=self._name_or_scope.dtype,\n custom_getter=self._name_or_scope.custom_getter,\n name_scope=name_scope,\n use_resource=self._name_or_scope.use_resource,\n constraint=self._constraint)\n if self._initializer is not None:\n variable_scope_object.set_initializer(self._initializer)\n if self._regularizer is not None:\n variable_scope_object.set_regularizer(self._regularizer)\n if self._caching_device is not None:\n variable_scope_object.set_caching_device(self._caching_device)\n if self._partitioner is not None:\n variable_scope_object.set_partitioner(self._partitioner)\n if self._custom_getter is not None:\n variable_scope_object.set_custom_getter(\n _maybe_wrap_custom_getter(\n self._custom_getter, self._name_or_scope.custom_getter))\n if self._dtype is not None:\n variable_scope_object.set_dtype(self._dtype)\n if self._use_resource is not None:\n variable_scope_object.set_use_resource(self._use_resource)\n self._cached_variable_scope_object = variable_scope_object\n\n def __enter__(self):\n \"\"\"Begins the scope block.\n\n Returns:\n A VariableScope.\n Raises:\n ValueError: when trying to reuse within a create scope, or create within\n a reuse scope, or if reuse is not `None` or `True`.\n TypeError: when the types of some arguments are not appropriate.\n \"\"\"\n self._old = self._default_varscope[0]\n if isinstance(self._name_or_scope, VariableScope):\n self._var_store.open_variable_scope(self._new_name)\n self._old_subscopes = copy.copy(self._var_store.variable_scopes_count)\n variable_scope_object = self._cached_variable_scope_object\n else:\n # Handler for the case when we just prolong current variable scope.\n # VariableScope with name extended by the provided one, and inherited\n # reuse and initializer (except if the user provided values to set).\n self._new_name = (\n self._old.name + \"/\" + self._name_or_scope if self._old.name\n else self._name_or_scope)\n self._reuse = (self._reuse\n or self._old.reuse) # Re-using is inherited by sub-scopes.\n variable_scope_object = VariableScope(\n self._reuse,\n name=self._new_name,\n initializer=self._old.initializer,\n regularizer=self._old.regularizer,\n caching_device=self._old.caching_device,\n partitioner=self._old.partitioner,\n dtype=self._old.dtype,\n use_resource=self._old.use_resource,\n custom_getter=self._old.custom_getter,\n name_scope=self._old_name_scope or self._name_or_scope,\n constraint=self._constraint)\n if self._initializer is not None:\n variable_scope_object.set_initializer(self._initializer)\n if self._regularizer is not None:\n variable_scope_object.set_regularizer(self._regularizer)\n if self._caching_device is not None:\n variable_scope_object.set_caching_device(self._caching_device)\n if self._partitioner is not None:\n variable_scope_object.set_partitioner(self._partitioner)\n if self._custom_getter is not None:\n variable_scope_object.set_custom_getter(\n _maybe_wrap_custom_getter(self._custom_getter,\n self._old.custom_getter))\n if self._dtype is not None:\n variable_scope_object.set_dtype(self._dtype)\n if self._use_resource is not None:\n variable_scope_object.set_use_resource(self._use_resource)\n self._var_store.open_variable_scope(self._new_name)\n self._default_varscope[0] = variable_scope_object\n return variable_scope_object\n\n def __exit__(self, type_arg, value_arg, traceback_arg):\n # If jumping out from a non-prolonged scope, restore counts.\n if isinstance(self._name_or_scope, VariableScope):\n self._var_store.variable_scopes_count = self._old_subscopes\n else:\n self._var_store.close_variable_subscopes(self._new_name)\n self._default_varscope[0] = self._old\n\n\ndef _maybe_wrap_custom_getter(custom_getter, old_getter):\n \"\"\"Wrap a call to a custom_getter to use the old_getter internally.\"\"\"\n if old_getter is None:\n return custom_getter\n\n # The new custom_getter should call the old one\n def wrapped_custom_getter(getter, *args, **kwargs):\n # Call:\n # custom_getter(\n # lambda: old_getter(true_getter, ...), *args, **kwargs)\n # which means custom_getter will call old_getter, which\n # will call the true_getter, perform any intermediate\n # processing, and return the results to the current\n # getter, which will also perform additional processing.\n return custom_getter(\n functools.partial(old_getter, getter),\n *args, **kwargs)\n return wrapped_custom_getter\n\n\ndef _get_unique_variable_scope(prefix):\n \"\"\"Get a name with the given prefix unique in the current variable scope.\"\"\"\n var_store = _get_default_variable_store()\n current_scope = get_variable_scope()\n name = current_scope.name + \"/\" + prefix if current_scope.name else prefix\n if var_store.variable_scope_count(name) == 0:\n return prefix\n idx = 1\n while var_store.variable_scope_count(name + (\"_%d\" % idx)) > 0:\n idx += 1\n return prefix + (\"_%d\" % idx)\n\n\n# Named like a function for backwards compatibility with the\n# @tf_contextlib.contextmanager version, which was switched to a class to avoid\n# some object creation overhead.\nclass variable_scope(object): # pylint: disable=invalid-name\n \"\"\"A context manager for defining ops that creates variables (layers).\n\n This context manager validates that the (optional) `values` are from the same\n graph, ensures that graph is the default graph, and pushes a name scope and a\n variable scope.\n\n If `name_or_scope` is not None, it is used as is. If `scope` is None, then\n `default_name` is used. In that case, if the same name has been previously\n used in the same scope, it will be made unique by appending `_N` to it.\n\n Variable scope allows you to create new variables and to share already created\n ones while providing checks to not create or share by accident. For details,\n see the @{$variables$Variable Scope How To}, here we present only a few basic\n examples.\n\n Simple example of how to create a new variable:\n\n ```python\n with tf.variable_scope(\"foo\"):\n with tf.variable_scope(\"bar\"):\n v = tf.get_variable(\"v\", [1])\n assert v.name == \"foo/bar/v:0\"\n ```\n\n Basic example of sharing a variable AUTO_REUSE:\n\n ```python\n def foo():\n with tf.variable_scope(\"foo\", reuse=tf.AUTO_REUSE):\n v = tf.get_variable(\"v\", [1])\n return v\n\n v1 = foo() # Creates v.\n v2 = foo() # Gets the same, existing v.\n assert v1 == v2\n\n\n Basic example of sharing a variable with reuse=True:\n\n ```python\n with tf.variable_scope(\"foo\"):\n v = tf.get_variable(\"v\", [1])\n with tf.variable_scope(\"foo\", reuse=True):\n v1 = tf.get_variable(\"v\", [1])\n assert v1 == v\n ```\n\n Sharing a variable by capturing a scope and setting reuse:\n\n ```python\n with tf.variable_scope(\"foo\") as scope:\n v = tf.get_variable(\"v\", [1])\n scope.reuse_variables()\n v1 = tf.get_variable(\"v\", [1])\n assert v1 == v\n ```\n\n To prevent accidental sharing of variables, we raise an exception when getting\n an existing variable in a non-reusing scope.\n\n ```python\n with tf.variable_scope(\"foo\"):\n v = tf.get_variable(\"v\", [1])\n v1 = tf.get_variable(\"v\", [1])\n # Raises ValueError(\"... v already exists ...\").\n ```\n\n Similarly, we raise an exception when trying to get a variable that does not\n exist in reuse mode.\n\n ```python\n with tf.variable_scope(\"foo\", reuse=True):\n v = tf.get_variable(\"v\", [1])\n # Raises ValueError(\"... v does not exists ...\").\n ```\n\n Note that the `reuse` flag is inherited: if we open a reusing scope, then all\n its sub-scopes become reusing as well.\n\n A note about name scoping: Setting `reuse` does not impact the naming of other\n ops such as mult. See related discussion on\n [github#6189](https://github.com/tensorflow/tensorflow/issues/6189)\n\n Note that up to and including version 1.0, it was allowed (though explicitly\n discouraged) to pass False to the reuse argument, yielding undocumented\n behaviour slightly different from None. Starting at 1.1.0 passing None and\n False as reuse has exactly the same effect.\n \"\"\"\n\n def __init__(self,\n name_or_scope,\n default_name=None,\n values=None,\n initializer=None,\n regularizer=None,\n caching_device=None,\n partitioner=None,\n custom_getter=None,\n reuse=None,\n dtype=None,\n use_resource=None,\n constraint=None):\n \"\"\"Initialize the context manager.\n\n Args:\n name_or_scope: `string` or `VariableScope`: the scope to open.\n default_name: The default name to use if the `name_or_scope` argument is\n `None`, this name will be uniquified. If name_or_scope is provided it\n won't be used and therefore it is not required and can be None.\n values: The list of `Tensor` arguments that are passed to the op function.\n initializer: default initializer for variables within this scope.\n regularizer: default regularizer for variables within this scope.\n caching_device: default caching device for variables within this scope.\n partitioner: default partitioner for variables within this scope.\n custom_getter: default custom getter for variables within this scope.\n reuse: `True`, None, or tf.AUTO_REUSE; if `True`, we go into reuse mode\n for this scope as well as all sub-scopes; if tf.AUTO_REUSE, we create\n variables if they do not exist, and return them otherwise; if None, we\n inherit the parent scope's reuse flag. When eager execution is enabled,\n this argument is always forced to be tf.AUTO_REUSE.\n dtype: type of variables created in this scope (defaults to the type\n in the passed scope, or inherited from parent scope).\n use_resource: If False, all variables will be regular Variables. If True,\n experimental ResourceVariables with well-defined semantics will be used\n instead. Defaults to False (will later change to True). When eager\n execution is enabled this argument is always forced to be True.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n\n Returns:\n A scope that can be captured and reused.\n\n Raises:\n ValueError: when trying to reuse within a create scope, or create within\n a reuse scope.\n TypeError: when the types of some arguments are not appropriate.\n \"\"\"\n self._name_or_scope = name_or_scope\n self._default_name = default_name\n self._values = values\n self._initializer = initializer\n self._regularizer = regularizer\n self._caching_device = caching_device\n self._partitioner = partitioner\n self._custom_getter = custom_getter\n self._reuse = reuse\n self._dtype = dtype\n self._use_resource = use_resource\n self._constraint = constraint\n if self._default_name is None and self._name_or_scope is None:\n raise TypeError(\"If default_name is None then name_or_scope is required\")\n if self._reuse is False:\n # We don't allow non-inheriting scopes, False = None here.\n self._reuse = None\n if not (self._reuse is True\n or self._reuse is None\n or self._reuse is AUTO_REUSE):\n raise ValueError(\"The reuse parameter must be True or False or None.\")\n if self._values is None:\n self._values = []\n self._in_graph_mode = not context.in_eager_mode()\n if self._in_graph_mode:\n self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access\n self._cached_pure_variable_scope = None\n self._current_name_scope = None\n\n def __enter__(self):\n if self._in_graph_mode:\n self._graph_context_manager = self._graph.as_default()\n self._graph_context_manager.__enter__()\n if self._cached_pure_variable_scope is not None:\n # Fast path for re-entering variable_scopes. We've held on to the pure\n # variable scope from a previous __enter__, so we avoid some overhead by\n # re-using that object.\n if self._current_name_scope is not None:\n self._current_name_scope.__enter__()\n return self._cached_pure_variable_scope.__enter__()\n if self._name_or_scope is not None:\n if not isinstance(self._name_or_scope,\n (VariableScope,) + six.string_types):\n raise TypeError(\"VariableScope: name_or_scope must be a string or \"\n \"VariableScope.\")\n if isinstance(self._name_or_scope, six.string_types):\n name_scope = self._name_or_scope\n else:\n name_scope = self._name_or_scope.name.split(\"/\")[-1]\n if name_scope:\n self._current_name_scope = ops.name_scope(name_scope)\n current_name_scope_name = self._current_name_scope.__enter__()\n if isinstance(self._name_or_scope, six.string_types):\n old_name_scope = current_name_scope_name\n else:\n old_name_scope = self._name_or_scope.original_name_scope\n self._cached_pure_variable_scope = _pure_variable_scope(\n self._name_or_scope,\n reuse=self._reuse,\n initializer=self._initializer,\n regularizer=self._regularizer,\n caching_device=self._caching_device,\n partitioner=self._partitioner,\n custom_getter=self._custom_getter,\n old_name_scope=old_name_scope,\n dtype=self._dtype,\n use_resource=self._use_resource,\n constraint=self._constraint)\n return self._cached_pure_variable_scope.__enter__()\n else:\n self._current_name_scope = None\n # This can only happen if someone is entering the root variable scope.\n self._cached_pure_variable_scope = _pure_variable_scope(\n self._name_or_scope,\n reuse=self._reuse,\n initializer=self._initializer,\n regularizer=self._regularizer,\n caching_device=self._caching_device,\n partitioner=self._partitioner,\n custom_getter=self._custom_getter,\n dtype=self._dtype,\n use_resource=self._use_resource,\n constraint=self._constraint)\n return self._cached_pure_variable_scope.__enter__()\n\n else: # Here name_or_scope is None. Using default name, but made unique.\n if self._reuse:\n raise ValueError(\"reuse=True cannot be used without a name_or_scope\")\n self._current_name_scope = ops.name_scope(self._default_name)\n current_name_scope_name = self._current_name_scope.__enter__()\n unique_default_name = _get_unique_variable_scope(self._default_name)\n self._cached_pure_variable_scope = _pure_variable_scope(\n unique_default_name,\n initializer=self._initializer,\n regularizer=self._regularizer,\n caching_device=self._caching_device,\n partitioner=self._partitioner,\n custom_getter=self._custom_getter,\n old_name_scope=current_name_scope_name,\n dtype=self._dtype,\n use_resource=self._use_resource,\n constraint=self._constraint)\n return self._cached_pure_variable_scope.__enter__()\n\n def __exit__(self, type_arg, value_arg, traceback_arg):\n self._cached_pure_variable_scope.__exit__(\n type_arg, value_arg, traceback_arg)\n if self._current_name_scope:\n self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg)\n if self._in_graph_mode:\n self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg)\n\n\n# pylint: disable=g-doc-return-or-yield\n@tf_contextlib.contextmanager\ndef variable_op_scope(values,\n name_or_scope,\n default_name=None,\n initializer=None,\n regularizer=None,\n caching_device=None,\n partitioner=None,\n custom_getter=None,\n reuse=None,\n dtype=None,\n use_resource=None,\n constraint=None):\n \"\"\"Deprecated: context manager for defining an op that creates variables.\"\"\"\n logging.warn(\"tf.variable_op_scope(values, name, default_name) is deprecated,\"\n \" use tf.variable_scope(name, default_name, values)\")\n with variable_scope(name_or_scope,\n default_name=default_name,\n values=values,\n initializer=initializer,\n regularizer=regularizer,\n caching_device=caching_device,\n partitioner=partitioner,\n custom_getter=custom_getter,\n reuse=reuse,\n dtype=dtype,\n use_resource=use_resource,\n constraint=constraint) as scope:\n yield scope\n\n\ndef _compute_slice_dim_and_shape(full_shape, slicing):\n \"\"\"Computes which dimension is being sliced and the typical slice shape.\"\"\"\n\n slice_shape = [0] * len(full_shape)\n slice_dim = None\n for dim, num_slices in enumerate(slicing):\n dim_size = full_shape[dim]\n if num_slices <= 0 or dim_size < num_slices:\n raise ValueError(\"Cannot create %d slices for size %d. shape: %s, \"\n \"slicing: %s\" %\n (num_slices, full_shape[dim], full_shape, slicing))\n if num_slices == 1:\n # Not slicing in this dimension.\n slice_shape[dim] = dim_size\n elif slice_dim is not None:\n # We only support slicing along one of the dimensions.\n raise ValueError(\"Can only slice a variable along one dimension: \"\n \"shape: %s, slicing: %s\" % (full_shape, slicing))\n else:\n # Note: We will add any extras onto the last slice, later.\n slice_dim = dim\n slice_shape[dim] = dim_size // num_slices\n\n # Degenerate case: If \"slicing\" was all ones, pretend we are slicing along\n # the first dimension.\n if slice_dim is None:\n slice_dim = 0\n return slice_dim, slice_shape\n\n\ndef variable(initial_value=None,\n trainable=True,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n dtype=None):\n use_resource = get_variable_scope().use_resource\n if use_resource or (use_resource is None and context.in_eager_mode()):\n return resource_variable_ops.ResourceVariable(\n initial_value=initial_value, trainable=trainable,\n collections=collections, validate_shape=validate_shape,\n caching_device=caching_device, name=name, dtype=dtype)\n elif not use_resource and context.in_eager_mode():\n raise RuntimeError(\n \"VariableScope should use resource variable when eager execution is\"\n \" enabled, but use_resource is False.\"\n )\n else:\n return variables.Variable(\n initial_value=initial_value, trainable=trainable,\n collections=collections, validate_shape=validate_shape,\n caching_device=caching_device, name=name, dtype=dtype)\n" ]
[ [ "tensorflow.python.eager.context.in_eager_mode", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.ops.variables.Variable", "tensorflow.python.framework.ops.get_collection_ref", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.ops.resource_variable_ops.ResourceVariable", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.platform.tf_logging.vlog", "tensorflow.python.estimator.util.fn_args", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.variables.PartitionedVariable", "tensorflow.python.platform.tf_logging.warn", "tensorflow.python.eager.context.in_graph_mode", "tensorflow.python.ops.init_ops.glorot_uniform_initializer", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.python.framework.ops._get_graph_from_inputs", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.framework.tensor_shape.as_shape" ] ]
darylbond/cerberus
[ "a1b99f6b50ba6876d4705f26e6be98ed6e1c5c6a", "a1b99f6b50ba6876d4705f26e6be98ed6e1c5c6a" ]
[ "Exec/testing/Orszag-Tang/movie.py", "Exec/testing/Viscous-Vortex/check.py" ]
[ "\nimport sys # nopep8\ncmd_folder = \"../../../vis\" # nopep8\nif cmd_folder not in sys.path: # nopep8\n sys.path.insert(0, cmd_folder)\n\nfrom tile_mov import tile_movie\nfrom make_mov import make_all, get_particle_trajectories\nimport numpy as np\nimport pylab as plt\n\n\n# ==============================================================================\n# MAKE MOVIES\n# ==============================================================================\n\ndef get_velocity_magnitude(ds, c):\n x, u = ds.get(\"x_vel-mhd\")\n x, v = ds.get(\"y_vel-mhd\", grid='node')\n\n return {\"x\":x[0], \"y\":x[1], \"value\":np.sqrt(u**2 + v**2)}\n\n\ndef get_particles(ds, c):\n idat, rdat = ds.get_particles('mhd')\n return {\"i\":idat, \"r\":rdat}\n\ndef plot(frame, data, output_name):\n\n dat = data[\"velocity\"]\n xn = dat[\"x\"]\n yn = dat[\"y\"]\n vel = dat[\"value\"]\n vmin = frame[\"velocity\"][\"min\"]\n vmax = frame[\"velocity\"][\"max\"]\n\n limits = frame[\"q\"][\"xy_limits\"]\n\n # particles\n px, py, pi = get_particle_trajectories(data[\"particles\"], limits)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n yn, xn = np.meshgrid(yn, xn)\n\n ax.pcolormesh(xn, yn, vel, vmin=vmin, vmax=vmax)\n\n l = 10 #streak length\n ax.plot(px[-l::], py[-l::], \"k-\", lw=0.5, alpha=0.5)\n ax.plot(px[-1], py[-1], \"ko\", ms=0.5, alpha=0.5)\n\n ax.text(0.05, 0.05, r'$\\left| \\vec{u} \\right|$', horizontalalignment='left', \n verticalalignment='bottom', transform=ax.transAxes, fontsize=18)\n\n ax.set_xlim(limits[0][0], limits[1][0])\n ax.set_ylim(limits[0][1], limits[1][1])\n\n ax.set_aspect(1)\n ax.axes.xaxis.set_visible(False)\n ax.axes.yaxis.set_visible(False)\n fig.tight_layout()\n fig.savefig(output_name, dpi=300, bbox_inches=\"tight\")\n plt.close(fig)\n\n return\n\n\n\ndt = 0.005\n\nQ = []\n\nq = {}\nq[\"files_dir\"] = \".\"\nq[\"level\"] = -1\n\n# all the data we need to retrieve\nq[\"get\"] = [\n {\"func\":get_velocity_magnitude, \"tag\":\"velocity\"},\n {\"func\":get_particles, \"tag\":\"particles\", \"get_streak\":True},\n]\n\n# how to make a frame\nq[\"plot\"] = plot\nq[\"name\"] = \"movie\"\n\n##\nq[\"framerate\"] = 20\nq[\"mov_save\"] = q[\"files_dir\"] + \"/mov\"\nq[\"offset\"] = [0.0, 0.0]\nq[\"xy_limits\"] = [[0.0, 0.0], [1.0, 1.0]]\nq[\"file_include\"] = [\"plt\"]\nq[\"file_exclude\"] = [\"chk\"]\nq[\"cores\"] = 4\nq[\"time_span\"] = [] #np.arange(0,0.1,0.005).tolist()\nq[\"force_data\"] = True\nq[\"force_frames\"] = True\nq[\"only_frames\"] = False\nq[\"redo_streaks\"] = False\nq[\"dpi\"] = 300\n\nq[\"normalize\"] = \"all\"\n\nQ.append(q)\n\nmake_all(Q)\n\nprint(\"DONE\")\n", "\nimport sys\ncmd_folder = \"../../../vis\"\nif cmd_folder not in sys.path:\n sys.path.insert(0, cmd_folder)\n \nfrom get_boxlib import ReadBoxLib, get_files\n\nimport numpy as np\nimport pylab as plt\nimport matplotlib.ticker as ticker\n\ndef check():\n\n #==============================================================================\n # Simulation results\n #==============================================================================\n\n # get a list of all the files in this directory\n files = get_files('.', include=['plt'], exclude=[\"temp\"], get_all=True)\n\n f = files[-1]\n\n data = ReadBoxLib(f)\n t = data.time\n \n data = ReadBoxLib(f, max_level=-1)\n\n xc, u = data.get(\"x_vel-air\")\n xc, v = data.get(\"y_vel-air\")\n\n vel = np.sqrt(u**2 + v**2)\n\n yc, xc = np.meshgrid(xc[1], xc[0])\n\n R = np.sqrt(xc**2 + yc**2)\n\n R_linear = np.ravel(R)\n vel_linear = np.ravel(vel)\n\n r_max = 8.0\n R_linear = np.ma.masked_where(R_linear>r_max, R_linear)\n vel_linear = np.ma.masked_where(R_linear>r_max, vel_linear)\n\n I = np.argsort(R_linear)\n R_linear = R_linear[I]\n vel_linear = vel_linear[I]\n\n # =============================================================================\n # analytical solution\n # =============================================================================\n\n # D. J. Munoz, V. Springel, R. Marcus, M. Vogelsberger, L. Hernquist, \n # Multidimensional, compressible viscous flow on a moving Voronoi mesh, \n # Monthly Notices of the Royal Astronomical Society, \n # Volume 428, Issue 1, 1 January 2013, Pages 254-279, \n # https://doi.org/10.1093/mnras/sts015\n\n G = 1.0\n mu0 = 0.08\n rho0 = 1.0\n nu = mu0/rho0\n t0 = 10.0\n def vtheta(R,t):\n return G/(2*np.pi*R)*(1-np.exp(-R**2/(4*nu*t)))\n\n vt = vtheta(R_linear, data.time+t0)\n\n # =============================================================================\n # check\n # =============================================================================\n\n success = 0\n\n rel_err = np.abs((vel_linear - vt)/vt)\n\n if np.max(rel_err) > 0.01:\n success = 1\n \n # =============================================================================\n # plot\n # =============================================================================\n\n plt.rc(\"font\", family=\"serif\")\n plt.rc(\"font\", size=8)\n plt.rc(\"mathtext\", fontset=\"cm\")\n # matplotlib.rc('text', usetex = True)\n params= {'text.latex.preamble' : [r'\\usepackage{amsmath}']}\n plt.rcParams.update(params)\n\n fig = plt.figure(figsize=(5,2))\n\n ax = fig.add_subplot(111)\n ax.plot(R_linear, vel_linear,'.', ms=2, mfc='none')\n ax.plot(R_linear, vt, 'k--', lw=1)\n ax.set_xlabel(r\"$r$\")\n ax.set_ylabel(r\"$v_\\theta$\")\n\n ax = ax.twinx()\n\n ax.plot(R_linear, rel_err*1000, 'r.', ms=0.5)\n ax.set_ylabel(r'$\\left| \\frac{\\hat{v}_\\theta - v_\\theta}{v_\\theta} \\right|\\times 10^3$')\n\n ax.set_xlim(0,8)\n\n ylim = ax.get_ylim()\n ax.set_ylim(0, ylim[1])\n\n fig.tight_layout()\n fig.savefig(\"plot.pdf\", dpi=300)\n\n return success\n \n\nif __name__ == \"__main__\":\n sys.exit(check())" ]
[ [ "numpy.meshgrid", "numpy.sqrt" ], [ "numpy.sqrt", "numpy.meshgrid", "numpy.abs", "numpy.exp", "numpy.max", "numpy.argsort", "numpy.ravel", "numpy.ma.masked_where" ] ]
Farazist/farazist-raspberrypi-app
[ "db497d68b2b206f0a5a5e6a9d88c464445179f8d" ]
[ "main_offline.py" ]
[ "from io import BytesIO\nimport os\nimport sys\nimport qrcode\nfrom pygame import mixer\nfrom time import sleep, time\nfrom threading import Thread, Timer, Event\nfrom functools import partial\nfrom escpos.printer import Usb\nfrom gpiozero import DistanceSensor\nfrom gpiozero.pins.native import NativeFactory\nfrom PySide2.QtUiTools import QUiLoader\nfrom PySide2.QtCore import Qt, QTimer, QDate, QTime, QSize, QThread, Signal\nfrom PySide2.QtGui import QMovie, QPixmap, QFont, QIcon\nfrom PySide2.QtWidgets import QApplication, QWidget, QSizePolicy, QPushButton, QVBoxLayout, QGridLayout, QLabel\nfrom PIL.ImageQt import ImageQt\nfrom scipy import stats\nfrom mfrc522 import SimpleMFRC522\nimport picamera\n\nfrom utils.motor import Motor\nfrom utils.server import Server\nfrom utils.database import DataBase\nfrom utils.custombutton import CustomButton\nfrom utils.image_classifier import ImageClassifier\nfrom utils.error_log import ErrorLog\nfrom utils.messages import *\n\n__author__ = \"Sara Zarei, Sajjad Aemmi\"\n__copyright__ = \"Copyright 2020\"\n__license__ = \"MIT\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\nDEVICE_VERSION = 'ورژن {}'\nBTN_PASS_RECOVERY_STYLE = 'font: 18pt \"IRANSans\";color: rgb(121, 121, 121);border: none; outline-style: none;'\n\nstack_timer = 240000\ndelivery_cancel_time = 20.0\ncamera_time = 3\n \n\nclass AutoDeliveryItemsThread(QThread):\n\n def __init__(self):\n QThread.__init__(self)\n \n def run(self):\n self.predicted_items = []\n try:\n with picamera.PiCamera(resolution=(1280, 720), framerate=30) as camera:\n # camera.start_preview()\n stream = BytesIO()\n for _ in camera.capture_continuous(stream, format='jpeg', use_video_port=True):\n if window.delivery_state in ['recognize', 'enter']:\n print('capturing...')\n stream.seek(0)\n label, score = window.image_classifier(stream)\n if score > window.predict_item_threshold:\n self.predicted_items.append(label)\n print(label, score)\n stream.seek(0)\n stream.truncate()\n else:\n # camera.stop_preview()\n break\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In AutoDeliveryItemsThread') \n\n\nclass RFIDThread(QThread):\n success_signal = Signal()\n fail_signal = Signal()\n \n def __init__(self):\n QThread.__init__(self)\n \n def run(self):\n try:\n print(\"Now place your tag to write\")\n id, old_data = window.rfid_sensor.read()\n new_data = window.ui.lbl_transfer_to_rfid.text()\n \n if old_data.isdigit():\n data = int(new_data) + int(old_data)\n else:\n data = int(new_data)\n window.rfid_sensor.write(str(data))\n print(\"Written\")\n self.success_signal.emit()\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In transferToRFIDCard Method')\n self.fail_signal.emit()\n\n\nclass MainWindow(QWidget):\n \n def __init__(self):\n QWidget.__init__(self)\n # super(MainWindow, self).__init__()\n \n self.system_id = DataBase.select('system_id')\n self.device_version = DataBase.select('app_version')\n self.device_mode = DataBase.select('device_mode')\n \n loader = QUiLoader()\n self.ui = loader.load('main.ui', None)\n\n sp_retain = QSizePolicy()\n sp_retain.setRetainSizeWhenHidden(True)\n self.ui.btn_left.setSizePolicy(sp_retain)\n self.ui.btn_right.setSizePolicy(sp_retain) \n self.ui.lbl_device_info.setSizePolicy(sp_retain)\n self.ui.btn_setting.setSizePolicy(sp_retain)\n\n self.btnOwnerLogin = CustomButton()\n self.btnOwnerLogin.setGif(\"animations/Rolling-white.gif\")\n self.ui.vLayoutSignInOwner.addWidget(self.btnOwnerLogin)\n self.ui.vLayoutSignInOwner.setAlignment(Qt.AlignHCenter)\n\n self.btnUserLoginID = CustomButton()\n self.btnUserLoginID.setGif(\"animations/Rolling-white.gif\")\n self.lbl = QLabel(None)\n self.lbl.setStyleSheet(BTN_PASS_RECOVERY_STYLE)\n self.ui.vLayoutSignInUser.addWidget(self.btnUserLoginID)\n self.ui.vLayoutSignInUser.addWidget(self.lbl)\n self.ui.vLayoutSignInUser.setAlignment(Qt.AlignHCenter)\n\n self.btnUserLoginMobile = CustomButton()\n self.btnUserLoginMobile.setGif(\"animations/Rolling-white.gif\")\n self.lbl = QLabel(None)\n self.lbl.setStyleSheet(BTN_PASS_RECOVERY_STYLE)\n\n # Threads\n self.auto_delivery_items_thread = AutoDeliveryItemsThread()\n\n self.rfid_thread = RFIDThread()\n self.rfid_thread.success_signal.connect(self.successTransferToRFIDCard)\n self.rfid_thread.fail_signal.connect(self.transferToRFIDCard)\n\n # signals\n self.ui.btn_refresh_loading.clicked.connect(self.refresh)\n self.ui.btn_main_menu_1.clicked.connect(self.checkDeviceMode)\n self.ui.btn_start.hide()\n #self.ui.btn_main_menu_3.clicked.connect(self.stackFastCharging)\n self.ui.btn_main_menu_4.clicked.connect(self.stackWalletServices)\n self.ui.btn_print_receipt_yes.clicked.connect(self.printReceipt)\n self.ui.btn_print_receipt_no.clicked.connect(self.stackStart)\n self.ui.btn_other_services_after_delivery.clicked.connect(self.stackWalletServices)\n self.ui.btn_no_exit_app_setting.clicked.connect(self.stackSetting)\n self.ui.btn_yes_exit_app_setting.clicked.connect(self.exitProgram)\n self.ui.btn_setting_start.clicked.connect(self.stackStart)\n self.ui.btn_setting_1.clicked.connect(self.stackDeviceMode)\n self.ui.btn_setting_5.clicked.connect(self.stackConveyorPort)\n self.ui.btn_setting_2.clicked.connect(self.stackPressMotor)\n # self.ui.btn_setting_10.clicked.connect(self.stackSeparationMotor)\n self.ui.btn_setting_3.clicked.connect(self.stackSensor1Ports)\n self.ui.btn_setting_9.clicked.connect(self.stackSensor2Ports)\n self.ui.btn_setting_6.clicked.connect(self.stackExitApp)\n self.ui.btn_wallet_services_1.clicked.connect(self.stackChargingResidentialUnit)\n self.ui.btn_wallet_services_2.clicked.connect(self.stackRFID)\n self.ui.btn_wallet_services_3.clicked.connect(self.stackCharity)\n self.ui.btn_wallet_services_4.clicked.connect(self.stackEnvirnmentalProtection)\n self.ui.btn_wallet_services_5.clicked.connect(self.stackWallet)\n self.ui.btn_plus_charity.clicked.connect(self.plusCharity)\n self.ui.btn_minus_charity.clicked.connect(self.minusCharity)\n self.ui.btn_plus_envirnmental_protection.clicked.connect(self.plusEnvirnment)\n self.ui.btn_minus_envirnmental_protection.clicked.connect(self.minusEnvirnment)\n self.ui.btn_plus_rfid.clicked.connect(self.plusRFID)\n self.ui.btn_minus_rfid.clicked.connect(self.minusRFID)\n self.ui.btn_confirm_transfer_to_RFIDcard.clicked.connect(self.transferToRFIDCard)\n\n self.ui.btn_charity_1.clicked.connect(lambda: self.ui.lbl_selected_charity.setText(self.ui.lbl_charity_1.text()))\n self.ui.btn_charity_2.clicked.connect(lambda: self.ui.lbl_selected_charity.setText(self.ui.lbl_charity_2.text()))\n self.ui.btn_charity_3.clicked.connect(lambda: self.ui.lbl_selected_charity.setText(self.ui.lbl_charity_3.text()))\n self.ui.btn_charity_4.clicked.connect(lambda: self.ui.lbl_selected_charity.setText(self.ui.lbl_charity_4.text()))\n \n self.ui.btn_envirnmental_protection_1.clicked.connect(lambda: self.ui.lbl_selected_envirnmental_protection.setText(self.ui.lbl_envirnmental_protection_1.text()))\n self.ui.btn_envirnmental_protection_2.clicked.connect(lambda: self.ui.lbl_selected_envirnmental_protection.setText(self.ui.lbl_envirnmental_protection_2.text()))\n self.ui.btn_envirnmental_protection_3.clicked.connect(lambda: self.ui.lbl_selected_envirnmental_protection.setText(self.ui.lbl_envirnmental_protection_3.text()))\n self.ui.btn_envirnmental_protection_4.clicked.connect(lambda: self.ui.lbl_selected_envirnmental_protection.setText(self.ui.lbl_envirnmental_protection_4.text()))\n\n self.ui.setWindowFlags(Qt.FramelessWindowHint|Qt.Dialog)\n self.ui.showMaximized()\n\n self.back_delivery_item_flag = False\n self.flag_system_startup_now = True\n\n self.delivery_state = 'none'\n\n # self.categories = Server.getCategories()\n self.image_classifier = ImageClassifier()\n self.predict_item_threshold = float(DataBase.select('predict_item_threshold'))\n self.initHardwares()\n self.readFile()\n self.stackSetting()\n self.playSound('audio2')\n self.refresh()\n\n def readFile(self):\n f = open('items.csv', encoding='utf-8')\n self.items = []\n for line in f:\n item = line.split(',')\n self.items.append({\n 'id':int(item[0]),\n 'category_id': int(item[1]),\n 'price': int(item[2]),\n 'name':item[3]\n })\n\n def initHardwares(self):\n\n try:\n if hasattr(self, 'press_motor'):\n self.press_motor.close()\n \n if hasattr(self, 'conveyor_motor'):\n self.conveyor_motor.close()\n\n if hasattr(self, 'distance_sensor1'):\n self.distance_sensor1.close()\n print(\"distance sensor 1 close\")\n \n if hasattr(self, 'distance_sensor2'):\n self.distance_sensor2.close()\n print(\"distance sensor 2 close\")\n \n except Exception as e:\n print(\"error:\", e)\n \n try:\n self.press_motor = Motor(name='press_motor', pin_factory=factory)\n \n self.setButton(self.ui.btn_press_motor_forward_on, function=self.press_motor.forward)\n self.setButton(self.ui.btn_press_motor_backward_on, function=self.press_motor.backward)\n self.setButton(self.ui.btn_press_motor_off, function=self.press_motor.stop)\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In press_motor initHardwares Method')\n\n try:\n\n # normal\n # self.conveyor_motor = Motor(name='conveyor_motor', pin_factory=factory)\n\n # red relay\n self.conveyor_motor = Motor(name='conveyor_motor', pin_factory=factory, active_high=True)\n \n self.conveyor_motor_time_2 = float(DataBase.select('conveyor_motor_time_2'))\n \n self.setButton(self.ui.btn_conveyor_motor_forward_on, function=self.conveyor_motor.forward)\n self.setButton(self.ui.btn_conveyor_motor_backward_on, function=self.conveyor_motor.backward)\n self.setButton(self.ui.btn_conveyor_motor_off, function=self.conveyor_motor.stop)\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In conveyor_motor initHardwares Method')\n \n try:\n distance_sensor1_trig_port = int(DataBase.select('distance_sensor1_trig_port'))\n distance_sensor1_echo_port = int(DataBase.select('distance_sensor1_echo_port'))\n distance_sensor1_threshold_distance = float(DataBase.select('distance_sensor1_threshold_distance'))\n self.distance_sensor1 = DistanceSensor(distance_sensor1_echo_port, distance_sensor1_trig_port, max_distance=1, threshold_distance=distance_sensor1_threshold_distance/100, pin_factory=factory)\n self.distance_sensor1.when_in_range = self.distanceSensor1WhenInRange\n self.distance_sensor1.when_out_of_range = self.distanceSensor1WhenOutOfRange\n print('distance sensor 1 ready')\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In distance_sensor1 initHardwares Method')\n\n try:\n distance_sensor2_trig_port = int(DataBase.select('distance_sensor2_trig_port'))\n distance_sensor2_echo_port = int(DataBase.select('distance_sensor2_echo_port'))\n distance_sensor2_threshold_distance = float(DataBase.select('distance_sensor2_threshold_distance'))\n self.distance_sensor2 = DistanceSensor(distance_sensor2_echo_port, distance_sensor2_trig_port, max_distance=1, threshold_distance=distance_sensor2_threshold_distance/100, pin_factory=factory)\n self.distance_sensor2.when_in_range = self.distanceSensor2WhenInRange\n self.distance_sensor2.when_out_of_range = self.distanceSensor2WhenOutOfRange\n print('distance sensor 2 ready')\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In distance_sensor2 initHardwares Method')\n\n try:\n if not hasattr(self, 'rfid_sensor'):\n self.rfid_sensor = SimpleMFRC522()\n print('RFID sensor ready')\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In rfid_sensor initHardwares Method')\n\n def setButton(self, button, function=None, text=None, icon=None, show=True):\n try:\n button.clicked.disconnect()\n except:\n pass\n finally:\n if function:\n button.clicked.connect(function)\n if text:\n button.setText(text)\n if icon:\n button.setIcon(QIcon(icon))\n if show:\n button.show()\n else:\n button.hide()\n\n def showNotification(self, text):\n self.ui.lbl_notification.setText(text)\n self.ui.lbl_notification.show()\n\n def hideNotification(self):\n self.ui.lbl_notification.hide()\n\n def playSound(self, path):\n try:\n path = os.path.join('sounds', path+'.mp3')\n if os.path.isfile(path):\n mixer.music.load(path)\n mixer.music.play()\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In playSound Method')\n\n def stopSound(self):\n try:\n mixer.music.stop()\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In stopSound Method')\n\n def makeGif(self):\n pngdir = 'images/slider'\n images = []\n kargs = { 'duration': 5 }\n for file_name in os.listdir(pngdir):\n if file_name.endswith('.JPG'):\n file_path = os.path.join(pngdir, file_name)\n # images.append(imageio.imread(file_path))\n # imageio.mimsave('animations/slider1.gif', images, 'GIF', **kargs)\n\n def loadingFail(self):\n self.ui.btn_refresh_loading.show()\n self.showNotification(SERVER_ERROR_MESSAGE)\n\n def refresh(self):\n self.showNotification(PLEASE_WAIT_MESSAGE)\n \n def showSoonNotification(self):\n self.showNotification(SOON_MESSAGE)\n\n def stackStart(self):\n self.setButton(self.ui.btn_left, show=False)\n self.setButton(self.ui.btn_right, show=False)\n self.ui.lbl_notification.hide()\n self.playSound('audio11')\n gif_start = QMovie(\"animations/slider1.gif\")\n self.ui.lbl_slider_start.setMovie(gif_start)\n gif_start.start()\n self.delivery_state = 'default'\n self.ui.Stack.setCurrentWidget(self.ui.pageStart)\n\n def stackMainMenu(self):\n self.setButton(self.ui.btn_left, function=self.signOutUser, text='خروج', icon='images/icon/log-out.png', show=True)\n self.setButton(self.ui.btn_right, show=False)\n self.ui.lbl_notification.hide()\n self.stopSound()\n self.delivery_state = 'default'\n self.ui.Stack.setCurrentWidget(self.ui.pageMainMenu)\n\n def stackWalletServices(self):\n self.setButton(self.ui.btn_left, function=self.stackMainMenu, text='بازگشت', icon='images/icon/back.png', show=True)\n self.setButton(self.ui.btn_right, show=False)\n self.ui.lbl_notification.hide()\n self.ui.Stack.setCurrentWidget(self.ui.pageWalletServices)\n\n def manualDeliveryRecycleItem(self):\n print('manualDeliveryRecycleItem')\n try: \n # self.showNotification(RECYCLE_MESSAGE)\n try:\n self.conveyor_motor.forward(timer=True)\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In press_motor_stop_timer startDeliveryItem Method')\n\n self.playSound('audio3')\n self.ui.btn_right.show()\n self.selected_item['count'] += 1\n self.ui.lbl_selected_item_count.setText(str(self.selected_item['count']))\n\n for user_item in self.user_items:\n if self.selected_item['id'] == user_item['id']:\n break\n else:\n self.user_items.append(self.selected_item)\n self.total_price = sum(user_item['price'] * user_item['count'] for user_item in self.user_items)\n self.ui.lbl_total.setText(str(self.total_price))\n\n try:\n self.press_motor.forward(timer=True)\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In press_motor_stop_timer startDeliveryItem Method')\n\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In endDeliveryItem Method')\n\n def stackManualDeliveryItems(self):\n self.setButton(self.ui.btn_left, show=False)\n self.setButton(self.ui.btn_right, function=self.stackAfterDelivery, text='پایان', icon='images/icon/tick.png', show=True)\n self.setButton(self.ui.btn_manual_delivery_recycle_item, function=self.manualDeliveryRecycleItem)\n # self.playSound('audio7')\n self.ui.lbl_total.setText(\"0\")\n self.ui.lbl_recycled_done.hide()\n self.user_items = []\n i = 0\n row = 0\n while row < len(self.items) // 2:\n for col in range(2):\n btn = QPushButton()\n self.items[i]['count'] = 0\n btn.setText(self.items[i]['name'])\n\n btn.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n btn.setStyleSheet('QPushButton:pressed {background-color: #6fdc89;border-style: inset;} QPushButton{background-color: #ffffff; border: 2px solid #28a745; border-radius: 10px; outline-style: none; font: 22pt \"IRANSansFaNum\"}')\n btn.setMinimumSize(250, 100)\n btn.clicked.connect(partial(self.SelectItem, self.items[i], btn))\n self.ui.gridLayout_6.addWidget(btn, row, col)\n i += 1\n if i >= len(self.items):\n break\n row += 1\n\n self.total_price = 0\n self.SelectItem(self.items[0], self.ui.gridLayout_6.itemAt(0))\n self.ui.Stack.setCurrentWidget(self.ui.pageManualDeliveryItems)\n \n def stackAutoDeliveryItems(self):\n self.setButton(self.ui.btn_left, show=False)\n self.setButton(self.ui.btn_right, function=self.stackAfterDelivery, text='پایان', icon='images/icon/tick.png', show=True)\n self.ui.lbl_notification.hide()\n self.ui.btn_setting.hide()\n # self.ui.list_auto_delivery_items.clear()\n self.ui.lbl_pixmap_category_1.setPixmap(QPixmap(\"images/item/category1.png\").scaledToHeight(128))\n self.ui.lbl_pixmap_category_3.setPixmap(QPixmap(\"images/item/category3.png\").scaledToHeight(128))\n self.ui.lbl_pixmap_category_4.setPixmap(QPixmap(\"images/item/category4.png\").scaledToHeight(128)) \n\n self.ui.lbl_num_category_1.setText(str(0))\n self.ui.lbl_num_category_3.setText(str(0))\n self.ui.lbl_num_category_4.setText(str(0))\n \n self.ui.lbl_total_price_auto_delivery_items.setText(str(0))\n \n self.total_price = 0\n\n self.playSound('audio7')\n self.delivery_state = 'ready'\n\n self.user_items = []\n for item in self.items:\n item['count'] = 0\n\n self.ui.Stack.setCurrentWidget(self.ui.pageAutoDeliveryItems)\n\n def distanceSensor1WhenInRange(self):\n print('distanceSensor1WhenInRange')\n if self.delivery_state != 'none':\n if self.device_mode == 'auto' and self.delivery_state == 'default':\n self.delivery_state = 'ready'\n print('default to ready')\n self.stackAutoDeliveryItems()\n\n elif self.device_mode == 'manual' and self.delivery_state == 'default':\n self.delivery_state = 'ready'\n print(1)\n self.stackManualDeliveryItems()\n\n if self.device_mode == 'auto' and self.delivery_state != 'default':\n if self.delivery_state == 'ready':\n self.delivery_state = 'enter'\n print('delivery state changed: ready to enter')\n self.enterDeliveryItem()\n\n elif self.delivery_state == 'reject':\n self.delivery_state = 'pickup'\n print('delivery state changed: reject to pickup')\n self.pickupDeliveryItem()\n \n else:\n self.delivery_state = 'cancel'\n print('delivery state changed: cancel')\n self.cancelDeliveryItem()\n\n elif self.device_mode == 'manual':\n print(2)\n self.manualDeliveryRecycleItem()\n\n def distanceSensor1WhenOutOfRange(self):\n print('distanceSensor1WhenOutOfRange')\n if self.delivery_state == 'enter':\n self.delivery_state = 'recognize'\n print('delivery state changed: enter to recognize')\n self.startDeliveryItem()\n elif self.delivery_state == 'pickup':\n self.delivery_state = 'ready'\n print('delivery state changed: pickup to ready')\n\n def distanceSensor2WhenInRange(self):\n print('distanceSensor2WhenInRange')\n # if self.delivery_state == 'accept':\n # self.endDeliveryItem()\n\n def distanceSensor2WhenOutOfRange(self):\n print('distanceSensor2WhenOutOfRange')\n\n def pickupDeliveryItem(self):\n print('distanceSensor2WhenOutOfRange')\n try:\n self.cancel_delivery_item_timer.cancel()\n self.conveyor_motor.stop()\n except Exception as e:\n print(\"error:\", e)\n\n def enterDeliveryItem(self):\n print('enterDeliveryItem')\n try:\n self.conveyor_motor.forward(timer=True)\n\n except Exception as e:\n print(\"error:\", e)\n\n def startDeliveryItem(self):\n print('startDeliveryItem')\n try:\n self.conveyor_motor.stop()\n self.auto_delivery_items_thread.start()\n self.auto_delivery_items_timer = Timer(camera_time, self.validationDeliveryItem)\n self.auto_delivery_items_timer.start()\n self.cancel_delivery_item_timer = Timer(delivery_cancel_time, self.cancelDeliveryItem)\n self.cancel_delivery_item_timer.start()\n except Exception as e:\n print(\"error:\", e)\n\n def rejectDeliveryItem(self):\n print('rejectDeliveryItem')\n # self.showNotification(ITEM_NOT_RECOGNIZED_ERROR_MESSAGE)\n # sleep(0.01)\n self.conveyor_motor.backward(timer=True)\n\n def acceptDeliveryItem(self):\n print('acceptDeliveryItem')\n most_probability_item_index = stats.mode(self.auto_delivery_items_thread.predicted_items).mode[0]\n self.selected_item = self.items[most_probability_item_index]\n print('most probability item:', window.selected_item['name'])\n\n # self.ui.list_auto_delivery_items.insertItem(0, self.selected_item['name'])\n\n if self.selected_item['category_id'] == 1:\n self.ui.lbl_num_category_1.setText(str(int(self.ui.lbl_num_category_1.text()) + 1))\n elif self.selected_item['category_id'] == 3:\n self.ui.lbl_num_category_3.setText(str(int(self.ui.lbl_num_category_3.text()) + 1))\n elif self.selected_item['category_id'] == 4:\n self.ui.lbl_num_category_4.setText(str(int(self.ui.lbl_num_category_4.text()) + 1))\n # elif self.selected_item['category_id'] == 5:\n # self.ui.lbl_num_category_5.setText(str(int(self.ui.lbl_num_category_5.text()) + 1))\n\n self.total_price += int(self.selected_item['price'])\n # self.total_price = sum(user_item['price'] * user_item['count'] for user_item in self.user_items)\n self.ui.lbl_total_price_auto_delivery_items.setText(str(self.total_price))\n\n self.conveyor_motor.forward(timer=True)\n self.end_delivery_items_timer = Timer(self.conveyor_motor_time_2, self.endDeliveryItem)\n self.end_delivery_items_timer.start()\n self.delivery_state = 'end'\n # self.endDeliveryItem()\n\n def validationDeliveryItem(self):\n print('validationDeliveryItem')\n if self.delivery_state == 'recognize':\n self.delivery_state = 'validate'\n print('delivery state changed: recognize to validate')\n \n sleep(0.1)\n\n if len(self.auto_delivery_items_thread.predicted_items) > 0:\n self.delivery_state = 'accept'\n print('delivery state changed: validate to accept')\n self.acceptDeliveryItem()\n else:\n self.delivery_state = 'reject'\n print('delivery state changed: validate to reject')\n self.rejectDeliveryItem()\n\n def cancelDeliveryItem(self):\n # self.showNotification(DELIVERY_ERROR_MESSAGE)\n sleep(0.01)\n print('cancelDeliveryItem')\n self.conveyor_motor.stop()\n self.press_motor.stop()\n # self.separation_motor.stop()\n self.delivery_state = 'ready'\n print('delivery state changed: ready')\n\n def endDeliveryItem(self):\n print('endDeliveryItem')\n if self.delivery_state == 'end':\n try: \n # self.showNotification(RECYCLE_MESSAGE)\n sleep(0.01)\n self.cancel_delivery_item_timer.cancel()\n\n self.playSound('audio3')\n self.selected_item['count'] += 1\n self.ui.lbl_selected_item_count.setText(str(self.selected_item['count']))\n\n for user_item in self.user_items:\n if self.selected_item['id'] == user_item['id']:\n break\n else:\n self.user_items.append(self.selected_item)\n \n # self.conveyor_motor.stop()\n\n try:\n self.press_motor.forward(True)\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In press_motor_stop_timer startDeliveryItem Method')\n\n sleep(1)\n self.delivery_state = 'ready'\n\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In endDeliveryItem Method')\n\n def SelectItem(self, item, this_btn):\n self.selected_item = item\n self.selected_item['name'] = item['name']\n self.ui.lbl_selected_item.setText(self.selected_item['name'])\n self.ui.lbl_unit.setText(str(self.selected_item['price']))\n self.ui.lbl_selected_item_count.setText(str(self.selected_item['count']))\n # for btn in self.layout_FArea.findChildren(QPushButton):\n # btn.setStyleSheet('background-color: #ffffff; border: 2px solid #28a745; border-radius: 10px; outline-style: none; font: 24pt \"IRANSansFaNum\"')\n # this_btn.setStyleSheet('background-color: #28a745; color:#ffffff; border-radius: 10px; outline-style: none; font: 24pt \"IRANSansFaNum\"')\n \n def printReceipt(self):\n self.playSound('audio4')\n # printer = Usb(idVendor=0x0416, idProduct=0x5011, timeout=0, in_ep=0x81, out_ep=0x03)\n os.system('sudo -S python3 printer.py ' \n + str(self.total_price)\n + ' --datetime \"' + QDate.currentDate().toString(Qt.DefaultLocaleShortDate) + '-' + QTime.currentTime().toString(Qt.DefaultLocaleShortDate) + '\"')\n self.stackStart()\n\n def stackAfterDelivery(self):\n try:\n # self.total_price = sum(user_item['price'] * user_item['count'] for user_item in window.user_items) \n self.delivery_state = 'default'\n self.playSound('audio12')\n # self.setButton(self.ui.btn_left, show=False)\n # self.setButton(self.ui.btn_right, show=False)\n self.ui.lbl_notification.hide()\n # gif_afterDelivery = QMovie(\"animations/earth.gif\")\n # self.ui.lbl_gif_after_delivery.setMovie(gif_afterDelivery)\n # gif_afterDelivery.start()\n # self.ui.lbl_total_price.setText(str(self.total_price))\n self.printReceipt()\n except:\n self.showNotification(SERVER_ERROR_MESSAGE)\n ErrorLog.writeToFile('Server Error Message')\n \n try:\n self.press_motor.stop()\n self.conveyor_motor.stop()\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In stackAfterDelivery Method')\n\n def fastChargingDeliveryRecycleItem(self):\n pass\n\n def stackFastCharging(self):\n self.setButton(self.ui.btn_left, function=self.stackMainMenu, text='بازگشت', icon='images/icon/back.png', show=True)\n self.setButton(self.ui.btn_right, show=False)\n self.setButton(self.ui.btn_recycle_item_fast_charging, function=self.fastChargingDeliveryRecycleItem)\n self.ui.lbl_recycled_done_fast_charging.hide()\n self.ui.tb_unit_fast_charging.setText('')\n self.ui.tb_weight_fast_charging.setText('')\n\n self.layout_SArea_FastCharging = QGridLayout()\n for row in range(4):\n for col in range(2):\n btn = QPushButton()\n #self.items[i]['count'] = 0\n btn.setText('آهن')\n btn.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n btn.setMinimumSize(250, 100)\n btn.setStyleSheet('QPushButton:pressed { background-color: #9caf9f } QPushButton{ background-color: #ffffff} QPushButton{ border: 2px solid #28a745} QPushButton{ border-radius: 10px} QPushButton{ font: 24pt \"IRANSans\"} QPushButton{ font: 24pt \"IRANSansFaNum\"} QPushButton{ color: #000000}')\n #btn.clicked.connect(partial(self.SelectItem, self.items[i]))\n self.layout_SArea_FastCharging.addWidget(btn, row, col)\n\n #self.SelectItem(self.items[0])\n self.ui.scroll_area_widget_fast_charging.setLayout(self.layout_SArea_FastCharging)\n self.ui.Stack.setCurrentWidget(self.ui.pageFastDelivery)\n\n def stackWallet(self):\n self.setButton(self.ui.btn_left, function=self.stackWalletServices, text='بازگشت', icon='images/icon/back.png', show=True)\n self.setButton(self.ui.btn_right, show=False)\n self.ui.lbl_notification.hide()\n gif_wallet = QMovie(\"animations/wallet.gif\")\n gif_wallet.setScaledSize(QSize().scaled(256, 256, Qt.KeepAspectRatio))\n self.ui.lbl_gif_wallet.setMovie(gif_wallet)\n gif_wallet.start()\n self.ui.lbl_wallet.setText(str((\"{:,.0f}\").format(self.user['wallet'])))\n self.ui.Stack.setCurrentWidget(self.ui.pageWallet)\n\n def stackChargingResidentialUnit(self):\n self.setButton(self.ui.btn_left, function=self.stackWalletServices, text='بازگشت', icon='images/icon/back.png', show=True)\n self.setButton(self.ui.btn_right, show=False)\n self.ui.lbl_user_address.setText(self.user['address'])\n print(self.user['address'])\n sp_retain = QSizePolicy()\n sp_retain.setRetainSizeWhenHidden(True)\n self.ui.tb_user_new_address.setSizePolicy(sp_retain)\n self.ui.btn_changed_user_address.setSizePolicy(sp_retain)\n self.ui.tb_user_new_address.hide()\n self.ui.btn_changed_user_address.hide()\n self.ui.btn_edit_user_address.clicked.connect(self.editUserAddress)\n self.ui.Stack.setCurrentWidget(self.ui.pageChargingResidentialUnit)\n\n def editUserAddress(self):\n self.ui.tb_user_new_address.show()\n self.ui.btn_changed_user_address.show()\n\n def transferToRFIDCard(self):\n self.showNotification(TRANSFER_TO_RFID_MESSAGE)\n self.rfid_thread.start()\n\n def successTransferToRFIDCard(self):\n self.stackWalletServices()\n self.showNotification(SUCCESS_TRANSFER_TO_RFID_MESSAGE)\n\n def plusRFID(self):\n if self.user_wallet < int(self.ui.lbl_payment_rfid.text()):\n self.showNotification(MONEY_ERROR_MESSAGE)\n else:\n self.ui.lbl_transfer_to_rfid.setText(str(int(self.ui.lbl_transfer_to_rfid.text()) + self.money_RFID))\n self.user_wallet -= self.money_RFID\n self.ui.lbl_total_wallet_rfid.setText(str(\"{:,.0f}\".format(self.user_wallet)))\n\n def minusRFID(self):\n if int(self.ui.lbl_transfer_to_rfid.text()) > 0:\n self.ui.lbl_transfer_to_rfid.setText(str(int(self.ui.lbl_transfer_to_rfid.text()) - self.money_RFID))\n self.user_wallet += self.money_RFID\n self.ui.lbl_total_wallet_rfid.setText(str(\"{:,.0f}\".format(self.user_wallet)))\n else:\n print('End of minus operations')\n\n def stackRFID(self):\n self.setButton(self.ui.btn_left, function=self.stackWalletServices, text='بازگشت', icon='images/icon/back.png', show=True)\n self.setButton(self.ui.btn_right, show=False)\n self.hideNotification()\n\n self.user_wallet = self.user['wallet']\n self.money_RFID = int(self.ui.lbl_payment_rfid.text())\n\n self.ui.lbl_transfer_to_rfid.setText('0')\n self.ui.lbl_total_wallet_rfid.setText(str(\"{:,.0f}\".format(self.user_wallet)))\n self.ui.Stack.setCurrentWidget(self.ui.pageRFID)\n\n def plusCharity(self):\n if self.user_wallet < int(self.ui.lbl_payment_charity.text()):\n self.showNotification(MONEY_ERROR_MESSAGE)\n else:\n self.ui.lbl_deposit_price_charity_organization.setText(str(int(self.ui.lbl_deposit_price_charity_organization.text()) + self.money_charity_organization))\n self.user_wallet -= self.money_charity_organization\n self.ui.lbl_total_price_charity.setText(str(\"{:,.0f}\".format(self.user_wallet)))\n\n def minusCharity(self):\n if int(self.ui.lbl_deposit_price_charity_organization.text()) > 0:\n self.ui.lbl_deposit_price_charity_organization.setText(str(int(self.ui.lbl_deposit_price_charity_organization.text()) - self.money_charity_organization))\n self.user_wallet += self.money_charity_organization\n self.ui.lbl_total_price_charity.setText(str(\"{:,.0f}\".format(self.user_wallet)))\n else:\n print('End of minus operations')\n\n def stackCharity(self):\n self.setButton(self.ui.btn_left, function=self.stackWalletServices, text='بازگشت', icon='images/icon/back.png', show=True)\n self.setButton(self.ui.btn_right, text='تایید', icon='images/icon/tick.png', show=True)\n self.hideNotification()\n\n self.user_wallet = self.user['wallet']\n self.money_charity_organization = int(self.ui.lbl_payment_charity.text())\n\n self.ui.lbl_deposit_price_charity_organization.setText('0')\n self.ui.lbl_total_price_charity.setText(str(\"{:,.0f}\".format(self.user_wallet)))\n self.ui.lbl_selected_charity.setText(self.ui.lbl_charity_1.text())\n self.ui.Stack.setCurrentWidget(self.ui.pageCharity)\n\n def plusEnvirnment(self):\n if self.user_wallet < int(self.ui.lbl_payment_envirnmental_protection.text()):\n self.showNotification(MONEY_ERROR_MESSAGE)\n else:\n self.ui.lbl_deposit_price_environmental_organization.setText(str(int(self.ui.lbl_deposit_price_environmental_organization.text()) + self.money_envirnmental_organization))\n self.user_wallet -= self.money_envirnmental_organization\n self.ui.lbl_total_price_envirnmental_protection.setText(str(\"{:,.0f}\".format(self.user_wallet)))\n\n def minusEnvirnment(self):\n if int(self.ui.lbl_deposit_price_environmental_organization.text()) > 0:\n self.ui.lbl_deposit_price_environmental_organization.setText(str(int(self.ui.lbl_deposit_price_environmental_organization.text()) - self.money_envirnmental_organization))\n self.user_wallet += self.money_envirnmental_organization\n self.ui.lbl_total_price_envirnmental_protection.setText(str(\"{:,.0f}\".format(self.user_wallet)))\n else:\n print('End of minus operations')\n\n def stackEnvirnmentalProtection(self):\n self.setButton(self.ui.btn_left, function=self.stackWalletServices, text='بازگشت', icon='images/icon/back.png', show=True)\n self.setButton(self.ui.btn_right, text='تایید', icon='images/icon/tick.png', show=True)\n self.hideNotification()\n\n self.user_wallet = self.user['wallet']\n self.money_envirnmental_organization = int(self.ui.lbl_payment_envirnmental_protection.text())\n\n self.ui.lbl_deposit_price_environmental_organization.setText('0')\n self.ui.lbl_total_price_envirnmental_protection.setText(str(\"{:,.0f}\".format(self.user_wallet)))\n self.ui.lbl_selected_envirnmental_protection.setText(self.ui.lbl_envirnmental_protection_1.text())\n\n self.ui.Stack.setCurrentWidget(self.ui.pageEnvirnmentalProtection)\n\n def stackSetting(self):\n self.setButton(self.ui.btn_left, function=self.stackStart, text='بازگشت', icon='images/icon/back.png', show=True)\n self.setButton(self.ui.btn_right, function=self.saveSetting, text='ذخیره', icon='images/icon/save.png', show=True)\n \n result = DataBase.select('device_mode')\n if result == 'manual':\n self.ui.rb_manual_device_mode_setting.setChecked(True)\n elif result == 'auto':\n self.ui.rb_auto_device_mode_setting.setChecked(True)\n\n self.ui.lbl_notification.hide()\n self.ui.StackSetting.setCurrentWidget(self.ui.pageSettingEmpty)\n self.ui.Stack.setCurrentWidget(self.ui.pageSetting)\n\n def stackDisableDevice(self):\n self.ui.btn_left.hide()\n self.ui.btn_right.hide()\n self.ui.lbl_notification.hide()\n self.ui.Stack.setCurrentWidget(self.ui.pageDisableDevice)\n\n def checkDeviceMode(self):\n if self.device_mode == 'manual':\n self.stackManualDeliveryItems()\n elif self.device_mode == 'auto':\n self.stackAutoDeliveryItems()\n \n def stackDeviceMode(self):\n result = DataBase.select('device_mode')\n if result == 'manual':\n self.ui.rb_manual_device_mode_setting.setChecked(True)\n elif result == 'auto':\n self.ui.rb_auto_device_mode_setting.setChecked(True)\n self.ui.StackSetting.setCurrentWidget(self.ui.pageSettingDeviceMode)\n\n def stackExitApp(self):\n self.ui.lbl_notification.hide()\n self.ui.StackSetting.setCurrentWidget(self.ui.pageSettingExit)\n\n def stackPressMotor(self):\n self.ui.lbl_notification.hide()\n self.ui.tb_press_motor_forward_port.setText(str(DataBase.select('press_motor_forward_port')))\n self.ui.tb_press_motor_backward_port.setText(str(DataBase.select('press_motor_backward_port')))\n self.ui.tb_press_motor_time.setText(str(DataBase.select('press_motor_time')))\n self.ui.StackSetting.setCurrentWidget(self.ui.pageSettingPressMotor)\n\n def stackSeparationMotor(self):\n self.ui.lbl_notification.hide()\n # self.ui.tb_separation_motor_forward_port.setText(str(DataBase.select('separation_motor_forward_port')))\n # self.ui.tb_separation_motor_backward_port.setText(str(DataBase.select('separation_motor_backward_port')))\n # self.ui.tb_separation_motor_time.setText(str(DataBase.select('separation_motor_time')))\n self.ui.StackSetting.setCurrentWidget(self.ui.pageSettingSeparationMotor)\n\n def stackSensor1Ports(self):\n self.ui.lbl_notification.hide()\n self.ui.tb_sensor1_trig_port.setText(str(DataBase.select('distance_sensor1_trig_port')))\n self.ui.tb_sensor1_echo_port.setText(str(DataBase.select('distance_sensor1_echo_port')))\n self.ui.tb_sensor1_depth_threshold.setText(str(DataBase.select('distance_sensor1_threshold_distance')))\n self.ui.StackSetting.setCurrentWidget(self.ui.pageSettingDistanceSensor1)\n\n def stackSensor2Ports(self):\n self.ui.lbl_notification.hide()\n self.ui.tb_sensor2_trig_port.setText(str(DataBase.select('distance_sensor2_trig_port')))\n self.ui.tb_sensor2_echo_port.setText(str(DataBase.select('distance_sensor2_echo_port')))\n self.ui.tb_sensor2_depth_threshold.setText(str(DataBase.select('distance_sensor2_threshold_distance')))\n self.ui.StackSetting.setCurrentWidget(self.ui.pageSettingDistanceSensor2)\n\n def stackConveyorPort(self):\n self.ui.lbl_notification.hide()\n self.ui.tb_conveyor_motor_forward_port.setText(str(DataBase.select('conveyor_motor_forward_port')))\n self.ui.tb_conveyor_motor_backward_port.setText(str(DataBase.select('conveyor_motor_backward_port')))\n self.ui.tb_conveyor_motor_time_1.setText(str(DataBase.select('conveyor_motor_time')))\n self.ui.tb_conveyor_motor_time_2.setText(str(DataBase.select('conveyor_motor_time_2')))\n self.ui.StackSetting.setCurrentWidget(self.ui.pageSettingConveyorMotor)\n\n def stackAddOpetator(self):\n self.ui.StackSetting.setCurrentWidget(self.ui.pageSettingAddOperator)\n\n def stackHelp(self):\n self.ui.StackSetting.setCurrentWidget(self.ui.pageSettingHelp)\n self.ui.lbl_version.setText(DEVICE_VERSION.format(self.device_version))\n self.ui.lbl_version.show()\n\n def stackLicense(self):\n self.ui.tb_license.setText(str(DataBase.select('app_version')))\n self.ui.StackSetting.setCurrentWidget(self.ui.pageSettingLicense)\n\n def changePredictItemFlag(self, value):\n self.predict_item_flag = value\n self.ui.lblDeliveryItems.clear()\n\n def saveSetting(self):\n self.showNotification(SETTING_SAVE_MESSAGE)\n if self.ui.rb_manual_device_mode_setting.isChecked():\n DataBase.update('device_mode', 'manual')\n elif self.ui.rb_auto_device_mode_setting.isChecked():\n DataBase.update('device_mode', 'auto')\n self.device_mode = DataBase.select('device_mode')\n \n if self.ui.tb_sensor1_trig_port.text() != '':\n result = DataBase.update('distance_sensor1_trig_port', self.ui.tb_sensor1_trig_port.text())\n if self.ui.tb_sensor1_echo_port.text() != '':\n result = DataBase.update('distance_sensor1_echo_port', self.ui.tb_sensor1_echo_port.text())\n if self.ui.tb_sensor1_depth_threshold.text() != '':\n result = DataBase.update('distance_sensor1_threshold_distance', self.ui.tb_sensor1_depth_threshold.text())\n\n if self.ui.tb_sensor2_trig_port.text() != '':\n result = DataBase.update('distance_sensor2_trig_port', self.ui.tb_sensor2_trig_port.text())\n if self.ui.tb_sensor2_echo_port.text() != '':\n result = DataBase.update('distance_sensor2_echo_port', self.ui.tb_sensor2_echo_port.text())\n if self.ui.tb_sensor2_depth_threshold.text() != '':\n result = DataBase.update('distance_sensor2_threshold_distance', self.ui.tb_sensor2_depth_threshold.text())\n\n if self.ui.tb_press_motor_forward_port.text() != '':\n result = DataBase.update('press_motor_forward_port', self.ui.tb_press_motor_forward_port.text())\n if self.ui.tb_press_motor_backward_port.text() != '':\n result = DataBase.update('press_motor_backward_port', self.ui.tb_press_motor_backward_port.text())\n if self.ui.tb_press_motor_time.text() != '':\n result = DataBase.update('press_motor_time', self.ui.tb_press_motor_time.text())\n \n # if self.ui.tb_separation_motor_forward_port.text() != '':\n # result = DataBase.update('separation_motor_forward_port', self.ui.tb_separation_motor_forward_port.text())\n # if self.ui.tb_separation_motor_backward_port.text() != '':\n # result = DataBase.update('separation_motor_backward_port', self.ui.tb_separation_motor_backward_port.text())\n # if self.ui.tb_separation_motor_time.text() != '':\n # result = DataBase.update('separation_motor_time', self.ui.tb_separation_motor_time.text())\n \n if self.ui.tb_conveyor_motor_forward_port.text() != '':\n result = DataBase.update('conveyor_motor_forward_port', self.ui.tb_conveyor_motor_forward_port.text())\n if self.ui.tb_conveyor_motor_backward_port.text() != '':\n result = DataBase.update('conveyor_motor_backward_port', self.ui.tb_conveyor_motor_backward_port.text())\n if self.ui.tb_conveyor_motor_time_1.text() != '':\n result = DataBase.update('conveyor_motor_time', self.ui.tb_conveyor_motor_time_1.text())\n if self.ui.tb_conveyor_motor_time_2.text() != '':\n result = DataBase.update('conveyor_motor_time_2', self.ui.tb_conveyor_motor_time_2.text())\n \n self.initHardwares()\n\n def exitProgram(self):\n # Server.turnOffSystemSMS(self.owner, self.system)\n self.delivery_item_flag = False\n self.close()\n QApplication.quit()\n\n\nif __name__ == '__main__':\n\n os.environ[\"QT_QPA_FB_FORCE_FULLSCREEN\"] = \"0\"\n os.environ[\"QT_IM_MODULE\"] = \"qtvirtualkeyboard\"\n os.environ[\"QT_QPA_FONTDIR\"] = \"/fonts\"\n # os.environ[\"ESCPOS_CAPABILITIES_FILE\"] = \"/usr/python-escpos/capabilities.json\"\n mixer.init()\n qr = qrcode.QRCode(version=2, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4)\n try:\n factory = NativeFactory()\n except Exception as e:\n print(\"error:\", e)\n ErrorLog.writeToFile(str(e) + ' In NativeFactory')\n\n app = QApplication(sys.argv)\n window = MainWindow()\n #timer = QTimer()\n #timer.start(10000) #it's aboat 1 seconds\n app.exec_()\n" ]
[ [ "scipy.stats.mode" ] ]
alkaren/MaskDetector
[ "e992796823e5d7b6941cdd9dd5df6e5c4f9e1ee4" ]
[ "source/video_detector.py" ]
[ "import time\nimport numpy as np\nimport cv2\nimport imutils\nimport os\nimport threading\n\nfrom datetime import date, datetime\nfrom imutils.video import VideoStream\nfrom tensorflow import keras\nfrom tensorflow.python.keras.applications.mobilenet_v2 import preprocess_input\n\nfrom source.utils import preprocess_face_frame, decode_prediction, write_bb, load_cascade_detector, check_count_image\nfrom flask import render_template, Response, flash\n\nmodel = keras.models.load_model('models/mask_mobilenet.h5')\n# model = keras.models.load_model('models/old_model.h5')\n\n# model = keras.models.load_model('testmodel/model.h5')\nface_detector = load_cascade_detector()\n\nglobal asdas\nasdas = check_count_image()\n\nstatusNotif = 0\nstatusSave = 0\nstatusThread = False\n\ndef video_mask_detector():\n video = VideoStream(src=0).start()\n # video = cv2.VideoCapture('test.mp4')\n time.sleep(1.0)\n while True:\n # Capture frame-by-frame\n frame = video.read()\n\n frame = detect_mask_in_frame(frame)\n # Display the resulting frame\n # show the output frame\n cv2.imshow(\"Mask detector\", frame)\n\n key = cv2.waitKey(1) & 0xFF\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n # cleanup\n cv2.destroyAllWindows()\n video.stop()\n\n\ndef detect_mask_in_frame(frame):\n frame = imutils.resize(frame, width=700)\n global statusNotif\n global statusSave\n global asdas\n today = date.today()\n currentTime = datetime.now().strftime(\"%H-%M\")\n pathDefault = \"G:/skripsi/MaskDetector/app/static/gambar_wajah/\"\n checkFolderToday = os.path.isdir(pathDefault + str(today))\n # print(\"folder hari ini =\", checkFolderToday)\n if checkFolderToday == False:\n NewPath = os.path.join(pathDefault, str(today))\n NewPath2 = os.path.join(pathDefault + str(today), \"noMask\")\n NewPath3 = os.path.join(pathDefault + str(today), \"withMask\")\n os.mkdir(NewPath)\n os.mkdir(NewPath2)\n os.mkdir(NewPath3)\n\n # convert an image from one color space to another\n # (to grayscale)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = face_detector.detectMultiScale(gray,\n scaleFactor=1.05,\n minNeighbors=5,\n minSize=(70, 70),\n maxSize=(300, 300),\n flags=cv2.CASCADE_SCALE_IMAGE,\n )\n counterObjectMask = 0\n \n faces_dict = {\"faces_list\": [],\n \"faces_rect\": []\n }\n\n SumFaces = len(faces)\n if (SumFaces == 0):\n statusNotif = 0\n statusSave = 0\n asdas = check_count_image()\n print(\"tidak ada wajah\")\n for rect in faces:\n (x, y, w, h) = rect\n face_frame = frame[y:y + h, x:x + w]\n # preprocess image\n face_frame_prepared = preprocess_face_frame(face_frame)\n\n faces_dict[\"faces_list\"].append(face_frame_prepared)\n faces_dict[\"faces_rect\"].append(rect)\n\n if faces_dict[\"faces_list\"]:\n faces_preprocessed = preprocess_input(np.array(faces_dict[\"faces_list\"]))\n preds = model.predict(faces_preprocessed)\n # SumFaces = len(faces_dict[\"faces_rect\"])\n # print(SumFaces)\n \n # print(asdas[1])\n counterObjectNoMask = asdas[1]\n\n for i, pred in enumerate(preds):\n\n strTime = str(today)\n varNoMask = currentTime + \"-NoMask-\"\n # print(varNoMask)\n varMask = currentTime + \"-WithMask-\"\n path = 'G:/skripsi/MaskDetector/app/static/gambar_wajah/' + strTime\n crop_img = faces_dict[\"faces_list\"][i]\n mask_or_not, confidence = decode_prediction(pred)\n # write_bb(mask_or_not, confidence, faces_dict[\"faces_rect\"][i], frame)\n FloatConfidence = float(confidence)\n #print(FloatConfidence)\n if mask_or_not == \"No mask\":\n # print(obj)\n\n counterObjectNoMask = counterObjectNoMask + 1\n print(counterObjectNoMask)\n statusNotif = 1\n \n cv2.imwrite(os.path.join(path+str(\"/noMask\"), str(counterObjectNoMask) + '.png'), crop_img) \n write_bb(mask_or_not, counterObjectNoMask, faces_dict[\"faces_rect\"][i], frame)\n # write_bb(mask_or_not, confidence, faces_dict[\"faces_rect\"][i], frame)\n # print(faces_dict[\"faces_rect\"][i])\n #listToStr = ' '.join(map(str, faces_dict[\"faces_rect\"][i]))\n # cv2.imwrite(os.path.join(path+str(\"/noMask\"), str(varNoMask) + str(listToStr.replace(\" \",\"\")) + '.png'), crop_img)\n # if statusSave == 0:\n # cv2.imwrite(os.path.join(path+str(\"/noMask\"), str(varNoMask) + str(counterObjectNoMask) + '.png'), crop_img) \n # write_bb(mask_or_not, confidence, faces_dict[\"faces_rect\"][i], frame)\n # else:\n # write_bb(mask_or_not, confidence, faces_dict[\"faces_rect\"][i], frame)\n \n # if statusThread == False:\n # print(statusSave)\n # threading.Timer(20.0, stop_save).start()\n # statusThread == True\n\n else:\n counterObjectMask = counterObjectMask + 1\n statusNotif = 0\n listToStr = ' '.join(map(str, faces_dict[\"faces_rect\"][i]))\n # cv2.imwrite(os.path.join(path+str(\"/withMask\"), str(varMask) +str(listToStr.replace(\" \",\"\")) + '.png'), crop_img)\n cv2.imwrite(os.path.join(path+str(\"/withMask\"), str(counterObjectMask) + '.png'), crop_img)\n write_bb(mask_or_not, confidence, faces_dict[\"faces_rect\"][i], frame)\n \n # time.sleep(10)\n # event.wait\n\n return frame\n\ndef print_notif():\n global statusNotif\n # print(statusNotif)\n if statusNotif == 1:\n # print(statusNotif)\n return statusNotif\n return statusNotif\n\ndef stop_save():\n global statusSave\n # print(statusNotif)\n if statusSave == 0:\n print(\"Pause Save!\")\n statusSave = 1\n # print(statusSave)\n continue_save()\n return statusSave\n return statusSave\n\ndef continue_save():\n global statusSave\n global statusThread\n print(\"Continue Save!\")\n statusSave = 0\n statusThread = False\n return statusSave, statusThread\n\n\nclass obj():\n def __init__(self, objectid):\n self.objectid = objectid\n\nif __name__ == '__main__':\n video_mask_detector()\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.array" ] ]
MatthiasJakobs/tsx
[ "8a686ffd0af2f9f826d9ce11349e0fa0e883e897", "8a686ffd0af2f9f826d9ce11349e0fa0e883e897" ]
[ "tsx/models/classifier/fcn.py", "tsx/examples/rocket_native_guide.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom tsx.models.classifier import BasePyTorchClassifier\n\nclass TimeSeries1DNet(BasePyTorchClassifier):\n\n def __init__(self, input_size=1, kernel_size=7, **kwargs):\n super().__init__(**kwargs)\n self.conv1 = self._conv1dblock(input_size, 32, kernel_size, padding=kernel_size // 2)\n self.conv2 = self._conv1dblock(32, 64, kernel_size, padding=kernel_size // 2)\n self.conv3 = self._conv1dblock(64, 128, kernel_size, padding=kernel_size // 2)\n\n self.avg_pool = nn.AvgPool1d(80)\n self.flatten = nn.Flatten()\n self.dense = nn.Linear(128, self.n_classes)\n\n def _conv1dblock(self, in_features, out_features, kernel_size=3, padding=0):\n return nn.Sequential(\n nn.Conv1d(in_features, out_features, kernel_size=kernel_size, padding=padding),\n nn.BatchNorm1d(out_features),\n nn.ReLU()\n )\n\n def reset_gradients(self):\n self.conv1[0].zero_grad()\n self.conv1[1].zero_grad()\n self.conv2[0].zero_grad()\n self.conv2[1].zero_grad()\n self.conv3[0].zero_grad()\n self.conv3[1].zero_grad()\n self.dense.zero_grad()\n\n def preprocessing(self, x_train, y_train, X_test=None, y_test=None):\n return x_train, y_train, X_test, y_test\n\n def forward(self, x, return_features=False):\n feats = self.get_features(x)\n\n x = self.avg_pool(feats)\n x = self.flatten(x)\n\n x = self.dense(x)\n if return_features:\n return x, feats\n return x\n\n def predict(self, X):\n prediction = nn.functional.softmax(self.forward(X), dim=-1)\n return torch.argmax(prediction, axis=-1).squeeze()\n\n def get_features(self, x, numpy=False):\n features = self.conv3(self.conv2(self.conv1(x))) \n if numpy:\n return features.detach().numpy()\n return features\n\n def get_logits(self, x, numpy=False):\n if numpy:\n return self.forward(x).detach().numpy()\n else:\n return self.forward(x)\n\n def get_class_weights(self, numpy=False):\n w = self.dense.weight.clone()\n w = w.detach()\n if numpy:\n return w.numpy()\n return w\n", "import numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nfrom tsx.models.classifier import ROCKET\nfrom tsx.datasets import load_itapowdem\nfrom tsx.counterfactuals import NativeGuide\n\nds = load_itapowdem()\nx_train, y_train = ds.torch(train=True)\nx_test, y_test = ds.torch(train=False)\n\nmodel = ROCKET(input_length=x_train.shape[-1], batch_size=100, n_classes=len(np.unique(y_train)))\nmodel.fit(x_train, y_train, x_test, y_test)\n\ncf = NativeGuide(model, x_train, y_train, distance='euclidian', batch_size=1000)\n\nprint(\"Original classes of input: {}\".format(y_test[0:2]))\n\n# Get two counterfactuals for each datapoint\ngenerated_cfs = cf.generate(x_test[0:2], y_test[0:2], n=2)\n\nplt.figure()\nfor i in range(len(generated_cfs)):\n plt.subplot(1,2,i+1)\n plt.plot(x_test[i].squeeze(), color='green')\n print(generated_cfs[i][0][1].shape)\n plt.plot(generated_cfs[i][0][1], color='red')\n\nplt.show()\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Flatten", "torch.nn.Linear", "torch.nn.Conv1d", "torch.nn.ReLU", "torch.nn.AvgPool1d", "torch.argmax" ], [ "numpy.unique", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
javicodema/tensorflow_course
[ "35ef9d3f9413a04dbcd5946bdaa17fba02088ab9" ]
[ "catsDogsClassifying.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport logging\n\nlogger = tf.get_logger()\nlogger.setLevel(logging.ERROR)\n\n_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'\nzip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True)\n\nzip_dir_base = os.path.dirname(zip_dir)\nbase_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered')\ntrain_dir = os.path.join(base_dir, 'train')\nvalidation_dir = os.path.join(base_dir, 'validation')\n\ntrain_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures\ntrain_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures\nvalidation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures\n\nnum_cats_tr = len(os.listdir(train_cats_dir))\nnum_dogs_tr = len(os.listdir(train_dogs_dir))\n\nnum_cats_val = len(os.listdir(validation_cats_dir))\nnum_dogs_val = len(os.listdir(validation_dogs_dir))\n\ntotal_train = num_cats_tr + num_dogs_tr\ntotal_val = num_cats_val + num_dogs_val\nprint('total training cat images:', num_cats_tr)\nprint('total training dog images:', num_dogs_tr)\n\nprint('total validation cat images:', num_cats_val)\nprint('total validation dog images:', num_dogs_val)\nprint(\"--\")\nprint(\"Total training images:\", total_train)\nprint(\"Total validation images:\", total_val)\n\nBATCH_SIZE = 100 # Number of training examples to process before updating our models variables\nIMG_SHAPE = 150 # Our training data consists of images with width of 150 pixels and height of 150 pixels\n\ntrain_image_generator = ImageDataGenerator(rescale=1. / 255) # Generator for our training data\nvalidation_image_generator = ImageDataGenerator(rescale=1. / 255) # Generator for our validation data\n\ntrain_data_gen = train_image_generator.flow_from_directory(batch_size=BATCH_SIZE,\n directory=train_dir,\n shuffle=True,\n target_size=(IMG_SHAPE, IMG_SHAPE), # (150,150)\n class_mode='binary')\n\nval_data_gen = validation_image_generator.flow_from_directory(batch_size=BATCH_SIZE,\n directory=validation_dir,\n shuffle=False,\n target_size=(IMG_SHAPE, IMG_SHAPE), # (150,150)\n class_mode='binary')\n\nsample_training_images, _ = next(train_data_gen)\n\n\n# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.\ndef plotImages(images_arr):\n fig, axes = plt.subplots(1, 5, figsize=(20, 20))\n axes = axes.flatten()\n for img, ax in zip(images_arr, axes):\n ax.imshow(img)\n plt.tight_layout()\n plt.show()\n\n\nplotImages(sample_training_images[:5]) # Plot images 0-4\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)),\n tf.keras.layers.MaxPooling2D(2, 2),\n\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n\n tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n\n tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dense(2)\n])\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.summary()\n\nEPOCHS = 100\nhistory = model.fit_generator(\n train_data_gen,\n steps_per_epoch=int(np.ceil(total_train / float(BATCH_SIZE))),\n epochs=EPOCHS,\n validation_data=val_data_gen,\n validation_steps=int(np.ceil(total_val / float(BATCH_SIZE)))\n)\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(EPOCHS)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.savefig('./foo.png')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "tensorflow.keras.preprocessing.image.ImageDataGenerator", "matplotlib.pyplot.title", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "matplotlib.pyplot.subplots", "tensorflow.get_logger", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "tensorflow.keras.layers.MaxPooling2D", "matplotlib.pyplot.subplot", "tensorflow.keras.utils.get_file", "matplotlib.pyplot.show", "tensorflow.keras.layers.Flatten", "matplotlib.pyplot.figure" ] ]
Antimortine/made_nlp_course
[ "2094e02751462f292d9dec75d02ad8c0672eda9b" ]
[ "homeworks/homework03/utils.py" ]
[ "import glob\nimport os\nimport tensorflow as tf\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import log_softmax\n\n\ndef remove_tech_tokens(mystr, tokens_to_remove=['<eos>', '<sos>', '<unk>', '<pad>']):\n return [x for x in mystr if x not in tokens_to_remove]\n\n\ndef get_text(x, TRG_vocab):\n text = [TRG_vocab.itos[token] for token in x]\n try:\n end_idx = text.index('<eos>')\n text = text[:end_idx]\n except ValueError:\n pass\n text = remove_tech_tokens(text)\n if len(text) < 1:\n text = []\n return text\n\n\ndef beam_search_for_sample(model, src, trg, eos, k=2):\n # src = [src sent len, batch size = 1]\n # trg = [trg sent len, batch size = 1]\n assert src.shape[1] == 1, \"Batch size must be equal to 1\"\n \n model.eval()\n max_len = trg.shape[0]\n trg_vocab_size = model.decoder.output_dim\n \n with torch.no_grad():\n # encoder_states = [batch size = 1, src sent len, dimensions = n layers * hid dim]\n # hidden = [n layers * n directions, batch size = 1, hid dim]\n encoder_states, hidden = model.apply_encoder(src)\n\n # first input to the decoder is the <sos> tokens\n input = trg[0,:]\n \n # output = [batch size = 1, output dim]\n # hidden = [n layers * n directions, batch size = 1, hid dim]\n output, hidden = model.decoder(input, hidden, encoder_states)\n output = log_softmax(output, dim=1) # output = [batch size = 1, output dim]\n top_pred = torch.topk(output, k, dim=1)\n \n encoder_states = torch.cat([encoder_states] * k, dim=0)\n \n # topk_log_probas = [batch size = 1, k]\n # top_k_outputs = [batch size = 1, k]\n topk_log_probas, top_k_outputs = top_pred.values, top_pred.indices\n \n tokens = torch.full((max_len, k*(k+1)), 0, dtype=torch.long, device=model.device)\n hidden = torch.cat([hidden] * k, dim=1) # [n layers * n directions, k, hid dim]\n scores = torch.zeros(k*(k+1), device=model.device)\n \n tokens[1,:k] = top_k_outputs[0]\n scores[:k] = topk_log_probas[0]\n \n for i in range(k):\n tokens[1, (i+1)*k:(i+2)*k] = tokens[1, i]\n\n for t in range(2, max_len):\n inputs = tokens[t-1,:k] # [k]\n \n # output = [k, output dim]\n # hidden = [n layers * n directions, k, hid dim]\n output, hidden = model.decoder(inputs, hidden, encoder_states)\n output = log_softmax(output, dim=1) # [k, output dim]\n top_pred = torch.topk(output, k, dim=1)\n \n # topk_log_probas = [batch size = k, top = k]\n # top_k_outputs = [batch size = k, top = k]\n topk_log_probas, top_k_outputs = top_pred.values, top_pred.indices\n tokens[t, k:] = top_k_outputs.view(-1)\n \n new_scores = (scores[:k].unsqueeze(1) * (t-1) + topk_log_probas) / t\n has_eos = (tokens[:t, k:] == eos).any(axis=0)\n scores[k:] = torch.where(has_eos, scores[k:], new_scores.view(-1))\n \n topk_indices = torch.topk(scores[k:], k).indices + k\n tokens[:,:k] = tokens[:,topk_indices]\n hidden = hidden.permute(1,0,2)\n hidden = hidden[(topk_indices - k) // k]\n hidden = hidden.permute(1,0,2).contiguous()\n scores[:k] = scores[topk_indices]\n\n for i in range(k):\n tokens[:, (i+1)*k:(i+2)*k] = tokens[:, i].unsqueeze(1)\n top1_index = scores[:k].max(0)[1]\n return tokens[:, top1_index]\n\n \ndef beam_search(model, src, trg, target_vocab, k=2):\n # src = [src sent len, batch size]\n # trg = [trg sent len, batch size]\n eos = target_vocab.stoi['<eos>']\n pred_tokens = [beam_search_for_sample(model, src[:,i,None], trg[:,i,None], eos, k).unsqueeze(1)\n for i in range(src.shape[1])]\n return torch.cat(pred_tokens, dim=1) # [trg sent len, batch size]\n\n\ndef generate_translation(src, trg, model, TRG_vocab, beam_widths=None):\n model.eval()\n beam_widths = beam_widths or []\n \n tabs = 28\n original = get_text(list(trg[:,0].cpu().numpy()), TRG_vocab)\n print('Original:\\t{}'.format(' '.join(original)).expandtabs(tabs))\n \n with torch.no_grad():\n output = model(src, trg, 0) # turn off teacher forcing\n output = output.argmax(dim=-1).cpu().numpy()\n \n generated = get_text(list(output[1:, 0]), TRG_vocab)\n print('Generated (Greedy):\\t{}'.format(' '.join(generated)).expandtabs(tabs))\n \n for beam_width in beam_widths:\n output = beam_search(model, src, trg, TRG_vocab, beam_width)\n generated = get_text(list(output[1:, 0]), TRG_vocab)\n print('Generated (BeamSearch@{}):\\t{}'.format(beam_width, ' '.join(generated)).expandtabs(tabs))\n print()\n\n\ndef parse_tensorboard_logs(dir_path):\n rows = []\n event_paths = glob.glob(os.path.join(dir_path, \"event*\"))\n for event_path in event_paths:\n for e in tf.train.summary_iterator(event_path):\n for v in e.summary.value:\n row = {'epoch': e.step, 'metric': v.tag, 'value': v.simple_value}\n rows.append(row)\n return (pd.DataFrame.from_records(rows)\n .pivot(index='epoch', columns='metric', values='value')\n .sort_index())\n\n\ndef plot_metrics(logs, model_name, axes=None):\n if axes is None:\n fig, axes = plt.subplots(ncols=2, figsize=(15, 7))\n\n loss_columns = list(logs.filter(like='loss').columns)\n bleu_columns = list(logs.filter(like='BLEU').columns)\n\n for loss_column in loss_columns:\n axes[0].plot(logs.index, logs[loss_column], label=loss_column)\n axes[0].set_xlabel('Epoch', fontsize=16)\n axes[0].set_ylabel('Cross-entropy loss', fontsize=16)\n axes[0].set_title(f'Lossess for {model_name}', fontsize=16)\n axes[0].grid()\n axes[0].legend(fontsize=16)\n\n for bleu_column in bleu_columns:\n axes[1].plot(logs.index, logs[bleu_column], label=bleu_column)\n axes[1].set_xlabel('Epoch', fontsize=16)\n axes[1].set_ylabel('BLEU', fontsize=16)\n axes[1].set_title(f'BLEU for {model_name}', fontsize=16)\n axes[1].grid()\n axes[1].legend(fontsize=16)\n plt.tight_layout()\n\n\ndef _len_sort_key(x):\n return len(x.src)\n\n\ndef init_weights(m):\n for name, param in m.named_parameters():\n nn.init.uniform_(param, -0.08, 0.08)\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n" ]
[ [ "torch.nn.init.uniform_", "matplotlib.pyplot.tight_layout", "torch.nn.functional.log_softmax", "torch.cat", "torch.full", "torch.zeros", "matplotlib.pyplot.subplots", "pandas.DataFrame.from_records", "torch.no_grad", "tensorflow.train.summary_iterator", "torch.topk" ] ]
mikelvallejo/covid-dashboard
[ "30d6db83d80bb524af373befb255f6c977edb3ab" ]
[ "src/pages/utils/load_time_series.py" ]
[ "import pandas as pd\nimport streamlit as st\n\n\[email protected]\ndef load_time_series():\n \"\"\"\n Function aggregates and returns a dictionary of time series data.\n :return: dict\n \"\"\"\n confirmed_data = pd.read_csv(\n \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv\")\n death_data = pd.read_csv(\n \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv\")\n recovered_data = pd.read_csv(\n \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv\")\n\n confirmed_data = confirmed_data.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'],\n var_name=\"Date\", value_name=\"Confirmed\")\n confirmed_data[\"Confirmed\"] = confirmed_data[\"Confirmed\"].fillna(0)\n confirmed_data.loc[:, \"Date\"] = confirmed_data[\"Date\"].apply(lambda s: pd.to_datetime(s).date())\n death_data = death_data.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'],\n var_name=\"Date\", value_name=\"Deaths\")\n death_data[\"Deaths\"] = death_data[\"Deaths\"].fillna(0)\n death_data.loc[:, \"Date\"] = death_data[\"Date\"].apply(lambda s: pd.to_datetime(s).date())\n recovered_data = recovered_data.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'],\n var_name=\"Date\", value_name=\"Recovered\")\n recovered_data[\"Recovered\"] = recovered_data[\"Recovered\"].fillna(0)\n recovered_data.loc[:, \"Date\"] = recovered_data[\"Date\"].apply(lambda s: pd.to_datetime(s).date())\n\n return {\n \"Confirmed\": confirmed_data,\n \"Deaths\": death_data,\n \"Recovered\": recovered_data\n }" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
ashwin2401/ga-learner-dsmp-repo
[ "57e8d055b385acf17b1390c0f36a68ee83ad9962" ]
[ "Customer-Segmentation/code.py" ]
[ "# --------------\n# import packages\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt \n\n\n\n# Load Offers\noffers = pd.read_excel(path,sheet_name=0)\n\n# Load Transactions\ntransactions = pd.read_excel(path,sheet_name=1)\ntransactions['n'] = 1\n# Merge dataframes\ndf = transactions.merge(offers)\ndf.head(5)\n# Look at the first 5 rows\n\n\n\n# --------------\n# Code starts here\n\n# create pivot table\nmatrix = df.pivot_table(index='Customer Last Name', columns='Offer #',values='n')\n\n# replace missing values with 0\nmatrix.fillna(0,inplace=True)\n\n# reindex pivot table\nmatrix.reset_index(inplace=True)\n\n# display first 5 rows\nmatrix.head(5)\n\n# Code ends here\n\n\n# --------------\n# import packages\nfrom sklearn.cluster import KMeans\n\n# Code starts here\n\n# initialize KMeans object\ncluster = KMeans(n_clusters=5, init='k-means++', max_iter=300, n_init=10,random_state=0)\n\n# create 'cluster' column\nmatrix['cluster'] =cluster.fit_predict(matrix[matrix.columns[1:]])\nmatrix.head()\n# Code ends here\n\n\n# --------------\n# import packages\nfrom sklearn.decomposition import PCA\n\n# Code starts here\n\n# initialize pca object with 2 components\npca = PCA(n_components=2, random_state=0)\n\n# create 'x' and 'y' columns donoting observation locations in decomposed form\nmatrix['x'] = pca.fit_transform(matrix[matrix.columns[1:]])[:,1]\nmatrix['y'] = pca.fit_transform(matrix[matrix.columns[1:]])[:,1]\n\n# dataframe to visualize clusters by customer names\nclusters = matrix.iloc[:,[0,33,34,35]]\n\n# visualize clusters\nclusters.plot.scatter(x='x', y='y', c='cluster', colormap='viridis')\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# merge 'clusters' and 'transactions'\ndata = pd.merge(clusters,transactions)\n# merge `data` and `offers`\ndata = pd.merge(offers,data)\n# initialzie empty dictionary\nchampagne = {}\n\n# iterate over every cluster\nfor i in range(0,5):\n new_df = data[data.cluster == i]\n counts = new_df['Varietal'].value_counts(ascending=False)\n if counts.index[0] == 'Champagne':\n champagne[i] = (counts[0])\n \ncluster_champagne = max(champagne,key=champagne.get)\nprint(cluster_champagne)\n # observation falls in that cluster\n\n # sort cluster according to type of 'Varietal'\n\n # check if 'Champagne' is ordered mostly\n\n # add it to 'champagne'\n\n\n# get cluster with maximum orders of 'Champagne' \ncluster_champagne = champagne\n\n# print out cluster number\nprint(cluster_champagne)\n\n\n\n# --------------\n# Code starts here\n\n# empty dictionary\ndiscount = {}\n\n# iterate over cluster numbers\nfor i in range(0,5):\n\n # dataframe for every cluster\n new_df = data[data.cluster == i]\n # average discount for cluster\n counts = new_df['Discount (%)'].values.sum()/len(new_df)\n # adding cluster number as key and average discount as value\n discount[i] = counts \n\n# cluster with maximum average discount\ncluster_discount = max(discount,key=discount.get)\nprint(cluster_discount)\n# Code ends here\n\n\n" ]
[ [ "pandas.merge", "pandas.read_excel", "sklearn.decomposition.PCA", "sklearn.cluster.KMeans" ] ]
makkenno/task6_python
[ "75db9075885b164b309bfc78a306d839e19ca9e2" ]
[ "ichibaitemranking.py" ]
[ "import requests\nimport pandas as pd\nimport pprint\nfrom pathlib import Path\n\nURL = 'https://app.rakuten.co.jp/services/api/IchibaItem/Ranking/20170628?'\nAPP_ID = '1063506377423532236'\n\ndef ranking(genre_id): \n params = {\n 'applicationId': APP_ID,\n 'format': 'json',\n 'genreId': genre_id,\n }\n\n res = requests.get(URL, params=params)\n res_json = res.json()\n # pprint.pprint(res_json, depth=2)\n\n data = []\n for item in res_json['Items']:\n print(f\"ランク:{item['Item']['rank']}\")\n print(f\"商品名:{item['Item']['itemName']}\")\n data.append([item['Item']['rank'],item['Item']['itemName']])\n\n df = pd.DataFrame(data, columns=['rank', 'item_name'])\n df.to_csv(Path(__file__).resolve().parent/Path('ranking.csv'), index=False)\n\n return res_json\n\nif __name__ == \"__main__\":\n genre_id = input('ジャンルIDを入力してください>>>')\n ranking(genre_id)\n" ]
[ [ "pandas.DataFrame" ] ]
cmpaulo/ProfilesArchiveInstagram
[ "fe1da302110d37039f38ee2cf09697e190d2709b" ]
[ "WebscrapingInstagram_v1.py" ]
[ "#!/usr/bin/env python3\n# coding: utf-8\n\n# # Web Scraping Instagram with Selenium\n\nfrom unicodedata import name\nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport time\n\nclass BotInstagram():\n\n\n def __init__(self):\n \n option = webdriver.ChromeOptions()\n # option.add_argument('headless')\n self.driver = webdriver.Chrome('/home/claudio/Downloads/Chrome/chromedriver',options=option)\n \n\n def address(self, link):\n \n self.driver.get(link)\n \n\n def login_ig(self, user = '', passd = ''):\n \n self.username = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"input[name='username']\")))\n self.password = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"input[name='password']\")))\n\n #enter username and password\n # perfilarquivologia \n self.username.clear()\n self.username.send_keys(user)\n self.password.clear()\n self.password.send_keys(passd)\n\n button = WebDriverWait(self.driver, 2).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"button[type='submit']\"))).click()\n not_now = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//button[contains(text(), \"Agora não\")]'))).click()\n not_now2 = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//button[contains(text(), \"Agora não\")]'))).click()\n \n\n def search_words(self, keywords):\n\n #target the search input field\n self.searchbox = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, \"//input[@placeholder='Pesquisar']\")))\n self.searchbox.clear()\n\n #search for the any\n self.searchbox.send_keys(keywords)\n time.sleep(2)\n divs = self.driver.find_elements(By.CLASS_NAME, 'fuqBx')\n\n return divs\n \n\n def get_status_and_follow_profiles(self, urls, follow = True):\n\n self.address(urls)\n time.sleep(3)\n \n try:\n posting = self.driver.find_element(By.XPATH,'/html/body/div[1]/section/main/div/header/section/ul/li[1]/div/span').text\n\n except:\n posting = \"0\" \n print(\"Public - Not found\")\n\n try:\n followers = self.driver.find_element(By.XPATH,'/html/body/div[1]/section/main/div/header/section/ul/li[2]/a/div/span').text\n\n except:\n followers = \"0\"\n print(\"Followers - Not found\")\n\n if follow:\n try:\n followbutton = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id=\"react-root\"]/section/main/div/header/section/div[1]/div[1]/div/div/div/span/span[1]/button')))\n if followbutton.text == \"Seguir\" or followbutton.text == \"Follow\":\n followbutton.click()\n print(' ### ### Following up new profile. !!!')\n else:\n pass\n except:\n pass\n\n return posting,followers\n\n\n def quit(self):\n\n self.driver.quit()\n\n\n\nbot = BotInstagram()\n\nbot.address('http://www.instagram.com')\n\nwith open('login_out.txt') as userfile:\n line = userfile.readline() \n usuario, senha = line.strip().split(' ')\n\nbot.login_ig(user = usuario, passd = senha)\n\n\n# open keywords keywords.txt files and save list lista.txt. \nfind_profiles = []\n\nwith open('lista.txt','w') as filew, open('keywords.txt','r') as words:\n\n search_word = words.readlines()\n\n for iw in search_word:\n\n divs = bot.search_words(f'{iw.split()[0].strip()}')\n \n for div in divs:\n\n print(f\"_{iw.split()[0]}_\")\n elements = div.find_elements(By.TAG_NAME, 'a')\n\n \n for element in elements:\n\n link = element.get_attribute(\"href\")\n \n if link not in find_profiles:\n \n if \"tags\" not in link:\n find_profiles.append(link)\n \n filew.write(link)\n filew.write('\\n')\n else: \n continue\n else: \n continue\n\nwords.close()\n\nfilew.close()\n\n# filter data from lista.txt\nimport pandas as pd\nfrom datetime import date\n\n\ndata = pd.read_csv('lista.txt',names=['urls_pf'])\n\nndata = data.drop_duplicates(keep='last').sort_values('urls_pf')\n\n\noutdata = pd.DataFrame(columns=['NumeroSeguidores','NumeroPostagens','@Perfil','url'])\n\n# open each profile and scrap followers and number of publications.\ndef string_ptbr_tonumber(number_string):\n \n if type(number_string) == float:\n \n valor = int(number_string)\n\n else:\n\n if \",\" in number_string:\n valor = number_string.replace(\",\",\".\").replace('mil','')\n valor = int(float(valor)*1000)\n \n elif \".\" in number_string:\n valor = number_string.replace(\".\",\"\")\n valor = int(float(valor))\n \n elif \"mil\" in number_string:\n valor = number_string.replace('mil','')\n valor = int(float(valor)*1000)\n \n else:\n valor = int(number_string)\n\n\n return valor\n\n\nfor iw in ndata.values:\n iw=iw[0]\n\n notag = iw.strip().split('/')\n\n if len(notag) < 6:\n post,follow = bot.get_status_and_follow_profiles(iw.strip(), follow=True)\n \n profileig = iw.strip().split('/')[-2]\n\n urllink = iw.strip()\n \n nflw = string_ptbr_tonumber(follow)\n npost = string_ptbr_tonumber(post)\n\n outdata.loc[len(outdata),['NumeroSeguidores','NumeroPostagens','@Perfil','url']] = [nflw, npost, profileig , urllink]\n \n else:\n continue\n\n# close bot instagram.\nbot.quit()\n\n\n# start datetime tag file and text dd/mm/YY\ntoday = date.today()\nd1 = today.strftime(\"%d%m%Y\")\n\ndate_s = today.strftime(\"%d/%m/%Y\") \n\nfileout = f\"lista_{d1}\"\n\n\norder_follow_number = outdata.sort_values('NumeroSeguidores', ascending=False)\n# save to file the list. arquived file\n\n\norder_follow_number.index = pd.RangeIndex(start=1, stop=len(order_follow_number)+1, step=1)\n\norder_follow_number.to_csv(fileout+'.csv')\n\n# save markdown. backup list\norder_follow_number.to_markdown(fileout+'.md')\n\n\n# format list to visualize in markdown file on github. creat a final list (lista_atual.txt) form backup list (lista_DDMMYYYY.md)\n\nwith open('lista_atual.md','w') as list_out, open(fileout+'.md') as list_f:\n\n cab = f\" **Perfis sobre Arquivologia no Instagram** \\n\\n Lista dos perfis encontratos a partir da pesquisa com os termos 'arquivo', 'arquivologia' e 'arquivística'. \\n\\n Pesquisa realizada no dia {date_s}.\\n\\n\"\n list_out.write(cab)\n for il in list_f.readlines():\n list_out.write(f'{il}')\n rpe = \"\\n\\n [Informações sobre o projeto 'Perfis sobre Arquivologia no Instagram'](https://github.com/mmacpaulo/ProfilesArchiveInstagram)\"\n list_out.write(rpe)\n\n list_f.close()\n list_out.close()\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
leefirefly/DL4Epi
[ "0f249579427c98881fc4145b6a820b91f3e39bed" ]
[ "models/VAR.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self, args, data):\n super(Model, self).__init__()\n self.use_cuda = args.cuda\n self.m = data.m\n self.w = args.window\n\n self.linear = nn.Linear(self.m * self.w, self.m);\n self.output = None;\n if (args.output_fun == 'sigmoid'):\n self.output = F.sigmoid;\n if (args.output_fun == 'tanh'):\n self.output = F.tanh;\n\n def forward(self, x):\n x = x.view(-1, self.m * self.w);\n x = self.linear(x);\n if (self.output != None):\n x = self.output(x);\n return x;\n\n\n\n" ]
[ [ "torch.nn.Linear" ] ]
TanayNarshana/rethinking-network-pruning
[ "85360333c909d539880ff59101c7b5f9609789f7" ]
[ "imagenet/regression-pruning/main_B.py" ]
[ "import argparse\nimport numpy as np\nimport os\nimport shutil\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nfrom tqdm import tqdm\n\nfrom compute_flops import count_model_param_flops\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nimport models\n\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('--model',default='',type=str,help='Model names: resnet-2x')\nparser.add_argument('-j', '--workers', default=25, type=int, metavar='N',\n help='number of data loading workers (default: 25)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--print-freq', '-p', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=1, type=int,\n help='number of distributed processes')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='gloo', type=str,\n help='distributed backend')\nparser.add_argument('--save', default='.', type=str, metavar='PATH',\n help='path to save prune model (default: current directory)')\n\nbest_prec1 = 0\n\ndef main():\n global args, best_prec1\n args = parser.parse_args()\n print(args)\n\n args.distributed = args.world_size > 1\n\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n\n if args.distributed:\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size)\n\n #################################################################################\n if args.model == 'resnet-2x':\n model = models.resnet_2x()\n model_ref = models.resnet50_official()\n\n if args.model == 'vgg-5x':\n model = models.vgg_5x()\n model_ref = models.vgg_official()\n\n flops_std = count_model_param_flops(model_ref, 224)\n flops_small = count_model_param_flops(model, 224)\n ratio = flops_std / flops_small\n if ratio >= 2:\n args.epochs = 180\n step_size = 60\n else:\n args.epochs = int(90 * ratio)\n step_size = int(args.epochs / 3)\n #################################################################################\n\n if not args.distributed:\n if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n else:\n model.cuda()\n model = torch.nn.parallel.DistributedDataParallel(model)\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda()\n\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = True\n\n # Data loading code\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion)\n return\n \n print(\"Training for {} epochs.\".format(args.epochs))\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, step_size)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch)\n\n # evaluate on validation set\n prec1 = validate(val_loader, model, criterion)\n\n # remember best prec@1 and save checkpoint\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, args.save) \n\ndef train(train_loader, model, criterion, optimizer, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (input, target) in enumerate(tqdm(train_loader)):\n # measure data loading time\n data_time.update(time.time() - end)\n\n target = target.cuda()\n\n # compute output\n output = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n top5.update(prec5.item(), input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n\ndef validate(val_loader, model, criterion):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n target = target.cuda()\n\n with torch.no_grad():\n # compute output\n output = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n top5.update(prec5.item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\ndef save_checkpoint(state, is_best, filepath, name='checkpoint.pth.tar'):\n torch.save(state, os.path.join(filepath, name))\n if is_best:\n shutil.copyfile(os.path.join(filepath, name), os.path.join(filepath, 'model_best.pth.tar'))\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef adjust_learning_rate(optimizer, epoch, step_size=30):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // step_size))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.load", "torch.utils.data.DataLoader", "torch.no_grad", "torch.nn.DataParallel", "torch.nn.parallel.DistributedDataParallel" ] ]
ayl/gpt-neox
[ "be5a1eaa394196b24a4cde5414d6afaed39570aa" ]
[ "megatron/logging.py" ]
[ "# Copyright (c) 2021, EleutherAI contributors\n# This file is based on code by the authors denoted below and has been modified from its original version.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport torch\nimport wandb\nfrom megatron import mpu, print_rank_0\nfrom megatron.utils import report_memory\n\n\nclass Tee:\n \"\"\"Duplicate output to both stdout/err and file\"\"\"\n\n def __init__(self, file, err=False):\n self.file = open(file, \"w\")\n self.err = err\n if not err:\n self.std = sys.stdout\n sys.stdout = self\n else:\n self.std = sys.stderr\n sys.stderr = self\n\n def __del__(self):\n if not self.err:\n sys.stdout = self.std\n else:\n sys.stderr = self.std\n self.file.close()\n\n def write(self, data):\n try:\n self.file.write(data)\n except OSError:\n pass\n try:\n self.std.write(data)\n except OSError:\n pass\n\n def flush(self):\n try:\n self.file.flush()\n except OSError:\n pass\n\n\ndef human_readable_flops(num):\n for unit in [\n \"\",\n \"KFLOPS\",\n \"MFLOPS\",\n \"GFLOPS\",\n \"TFLOPS\",\n \"PFLOPS\",\n \"EFLOPS\",\n \"ZFLOPS\",\n ]:\n if abs(num) < 1000.0:\n return \"%3.1f%s\" % (num, unit)\n num /= 1000.0\n return \"%.1f%s\" % (num, \"Yi\")\n\n\ndef get_flops(neox_args, model, iter_time_s):\n world_size = torch.distributed.get_world_size()\n ff = model.total_params * 6\n attn = neox_args.seq_length * neox_args.hidden_size * neox_args.num_layers * 60\n flops = (\n neox_args.train_batch_size\n * neox_args.seq_length\n * (ff + attn)\n / (iter_time_s * world_size)\n )\n return flops\n\n\ndef training_log(\n neox_args,\n timers,\n loss_dict,\n total_loss_dict,\n learning_rate,\n iteration,\n loss_scale,\n report_memory_flag,\n skipped_iter,\n model,\n optimizer,\n noise_scale_logger,\n):\n \"\"\"Log training information such as losses, timing, etc.\"\"\"\n\n # Update losses.\n skipped_iters_key = \"skipped iterations\"\n total_loss_dict[skipped_iters_key] = (\n total_loss_dict.get(skipped_iters_key, 0) + skipped_iter\n )\n got_nan_key = \"got nan\"\n\n got_nan = False\n for key in loss_dict:\n if not skipped_iter:\n total_loss_dict[key] = total_loss_dict.get(key, 0.0) + loss_dict[key]\n else:\n value = loss_dict[key].float().sum().item()\n is_nan = value == float(\"inf\") or value == -float(\"inf\") or value != value\n got_nan = got_nan or is_nan\n\n total_loss_dict[got_nan_key] = total_loss_dict.get(got_nan_key, 0) + int(got_nan)\n\n # Logging.\n timers_to_log = []\n\n def add_to_logging(name):\n if name in timers.timers:\n timers_to_log.append(name)\n\n if not neox_args.is_pipe_parallel:\n add_to_logging(\"forward\")\n add_to_logging(\"backward\")\n add_to_logging(\"backward-backward\")\n add_to_logging(\"backward-allreduce\")\n add_to_logging(\"backward-master-grad\")\n add_to_logging(\"backward-clip-grad\")\n add_to_logging(\"optimizer\")\n add_to_logging(\"batch generator\")\n\n # Log timer info to tensorboard and wandb\n normalizer = iteration % neox_args.log_interval\n if normalizer == 0:\n normalizer = neox_args.log_interval\n if torch.distributed.get_rank() == 0:\n timers.write(\n names=timers_to_log, iteration=iteration, normalizer=normalizer\n )\n else:\n # with pipeline parallel, the megatron timers are overridden by the deepspeed ones.\n # Try to grab timer values from model engine. Only recently added to deeperspeed, so check that the engine\n # has that attribute first\n if hasattr(model, \"timer_values\") and model.timer_values is not None:\n if (\n model.wall_clock_breakdown()\n and model.global_steps % model.steps_per_print() == 0\n ):\n timer_values = model.timer_values\n # deepspeed already logs to tensorboard / prints values, so just log to wandb\n if neox_args.use_wandb and torch.distributed.get_rank() == 0:\n for key in timer_values:\n tb_wandb_log(\n f\"timers/{key}\",\n timer_values[key],\n iteration,\n use_wandb=neox_args.use_wandb,\n tensorboard_writer=neox_args.tensorboard_writer,\n )\n\n # write losses, lr, etc. every step\n tb_wandb_log(\n \"train/learning_rate\",\n learning_rate,\n iteration,\n use_wandb=neox_args.use_wandb,\n tensorboard_writer=neox_args.tensorboard_writer,\n )\n for key in loss_dict:\n tb_wandb_log(\n f'train/{key.replace(\" \", \"_\")}',\n loss_dict[key],\n iteration,\n use_wandb=neox_args.use_wandb,\n tensorboard_writer=neox_args.tensorboard_writer,\n )\n if neox_args.fp16:\n tb_wandb_log(\n f\"train/loss_scale\",\n loss_scale,\n iteration,\n use_wandb=neox_args.use_wandb,\n tensorboard_writer=neox_args.tensorboard_writer,\n )\n\n # log gradient noise scale\n if neox_args.log_gradient_noise_scale:\n if noise_scale_logger.noise_scale is not None:\n tb_wandb_log(\n f\"train/noise_scale\",\n noise_scale_logger.noise_scale,\n iteration,\n use_wandb=neox_args.use_wandb,\n tensorboard_writer=neox_args.tensorboard_writer,\n )\n\n # (optional) Log optimizer states to wandb / tb every step\n if neox_args.log_optimizer_states:\n for k, v in optimizer.state_dict()[\"optimizer_state_dict\"][\"state\"].items():\n for ki, vi in v.items(): # step, module\n if ki != \"step\":\n opt_state_norm = torch.norm(vi) if hasattr(vi, \"dim\") else vi\n tb_wandb_log(\n f\"optimizer_state_norms/{k}_{ki}\",\n opt_state_norm,\n iteration,\n use_wandb=neox_args.use_wandb,\n tensorboard_writer=neox_args.tensorboard_writer,\n )\n\n # (optional) Log grad/param norms to wandb / tb every step\n if neox_args.log_grad_norm or neox_args.log_param_norm:\n if neox_args.log_grad_norm:\n model.store_gradients = True # start storing gradients\n for i, (name, param) in enumerate(model.module.named_parameters()):\n if neox_args.log_grad_norm:\n if (\n hasattr(model, \"stored_gradients\")\n and model.stored_gradients is not None\n ):\n grad = model.stored_gradients[i]\n if grad is not None:\n tb_wandb_log(\n f\"gradient_norms/{name}\",\n torch.norm(grad),\n iteration,\n use_wandb=neox_args.use_wandb,\n tensorboard_writer=neox_args.tensorboard_writer,\n all_ranks=True,\n )\n if neox_args.log_param_norm:\n tb_wandb_log(\n f\"parameter_norms/{name}\",\n torch.norm(param),\n iteration,\n use_wandb=neox_args.use_wandb,\n tensorboard_writer=neox_args.tensorboard_writer,\n all_ranks=True,\n )\n\n if iteration % neox_args.log_interval == 0:\n # log other stuff every neox_args.log_interval iters\n elapsed_time = timers(\"interval time\").elapsed()\n iteration_time = elapsed_time / neox_args.log_interval\n samples_per_sec = neox_args.train_batch_size / iteration_time\n log_string = \" samples/sec: {:.3f} |\".format(samples_per_sec)\n tb_wandb_log(\n \"runtime/samples_per_sec\",\n samples_per_sec,\n iteration,\n use_wandb=neox_args.use_wandb,\n tensorboard_writer=neox_args.tensorboard_writer,\n )\n tb_wandb_log(\n \"runtime/iteration_time\",\n iteration_time,\n iteration,\n use_wandb=neox_args.use_wandb,\n tensorboard_writer=neox_args.tensorboard_writer,\n )\n log_string += \" iteration {:8d}/{:8d} |\".format(\n iteration, neox_args.train_iters\n )\n log_string += \" elapsed time per iteration (ms): {:.1f} |\".format(\n elapsed_time * 1000.0 / neox_args.log_interval\n )\n log_string += \" learning rate: {:.3E} |\".format(learning_rate)\n num_iterations = max(\n 1, neox_args.log_interval - total_loss_dict[skipped_iters_key]\n )\n\n # log tflop / gpu\n flops_per_s_per_gpu = get_flops(\n neox_args=neox_args, model=model, iter_time_s=iteration_time\n )\n log_string += (\n f\" approx flops per GPU: {human_readable_flops(flops_per_s_per_gpu)} |\"\n )\n tb_wandb_log(\n \"runtime/flops_per_sec_per_gpu\",\n flops_per_s_per_gpu,\n iteration,\n use_wandb=neox_args.use_wandb,\n tensorboard_writer=neox_args.tensorboard_writer,\n )\n\n for key in total_loss_dict:\n if key not in [skipped_iters_key, got_nan_key]:\n v = (\n total_loss_dict[key].item()\n if hasattr(total_loss_dict[key], \"item\")\n else total_loss_dict[key]\n )\n avg = v / float(num_iterations)\n log_string += \" {}: {:.6E} |\".format(key, avg)\n total_loss_dict[key] = 0.0\n if neox_args.precision == \"fp16\":\n log_string += \" loss scale: {:.1f} |\".format(loss_scale)\n log_string += \" number of skipped iterations: {:3d} |\".format(\n total_loss_dict[skipped_iters_key]\n )\n log_string += \" number of nan iterations: {:3d} |\".format(\n total_loss_dict[got_nan_key]\n )\n total_loss_dict[skipped_iters_key] = 0\n total_loss_dict[got_nan_key] = 0\n print_rank_0(log_string)\n if report_memory_flag:\n report_memory(\"after {} iterations\".format(iteration))\n report_memory_flag = False\n\n timers.log(timers_to_log, normalizer=neox_args.log_interval)\n\n return report_memory_flag\n\n\ndef tb_wandb_log(\n key, value, iteration_no, use_wandb, tensorboard_writer=None, all_ranks=False\n):\n # logs to both tb and wandb (if present) from the zeroth rank\n do_log = torch.distributed.get_rank() == 0 or all_ranks\n if do_log and value is not None:\n if tensorboard_writer:\n tensorboard_writer.add_scalar(key, value, iteration_no)\n if use_wandb:\n wandb.log({key: value}, step=iteration_no)\n" ]
[ [ "torch.distributed.get_rank", "torch.distributed.get_world_size", "torch.norm" ] ]
ewanowara/geolocalization
[ "6fcd26772cc297ee49889463ee42ad025544330a" ]
[ "setup/download_images.py" ]
[ "from argparse import ArgumentParser\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nimport time\nfrom multiprocessing import Pool\nfrom functools import partial\nimport re\nimport logging\nimport requests\n\nimport msgpack\nimport pandas as pd\nimport PIL\nfrom PIL import ImageFile\n\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\nclass MsgPackWriter:\n def __init__(self, path, chunk_size=4096):\n self.path = Path(path).absolute()\n self.path.mkdir(parents=True, exist_ok=True)\n self.chunk_size = chunk_size\n\n shards_re = r\"shard_(\\d+).msg\"\n self.shards_index = [\n int(re.match(shards_re, x).group(1))\n for x in self.path.iterdir()\n if x.is_dir() and re.match(shards_re, x)\n ]\n self.shard_open = None\n\n def open_next(self):\n if len(self.shards_index) == 0:\n next_index = 0\n else:\n next_index = sorted(self.shards_index)[-1] + 1\n self.shards_index.append(next_index)\n\n if self.shard_open is not None and not self.shard_open.closed:\n self.shard_open.close()\n\n self.count = 0\n self.shard_open = open(self.path / f\"shard_{next_index}.msg\", \"wb\")\n\n def __enter__(self):\n self.open_next()\n return self\n\n def __exit__(self, type, value, tb):\n self.shard_open.close()\n\n def write(self, data):\n if self.count >= self.chunk_size:\n self.open_next()\n\n self.shard_open.write(msgpack.packb(data))\n self.count += 1\n\n\ndef _thumbnail(img: PIL.Image, size: int) -> PIL.Image:\n # resize an image maintaining the aspect ratio\n # the smaller edge of the image will be matched to 'size'\n w, h = img.size\n if (w <= size) or (h <= size):\n return img\n if w < h:\n ow = size\n oh = int(size * h / w)\n return img.resize((ow, oh), PIL.Image.BILINEAR)\n else:\n oh = size\n ow = int(size * w / h)\n return img.resize((ow, oh), PIL.Image.BILINEAR)\n\n\ndef flickr_download(x, size_suffix=\"z\", min_edge_size=None):\n\n # prevent downloading in full resolution using size_suffix\n # https://www.flickr.com/services/api/misc.urls.html\n\n image_id = x[\"image_id\"]\n url_original = x[\"url\"]\n if size_suffix != \"\":\n url = url_original\n # modify url to download image with specific size\n ext = Path(url).suffix\n url = f\"{url.split(ext)[0]}_{size_suffix}{ext}\"\n else:\n url = url_original\n\n r = requests.get(url)\n if r:\n try:\n image = PIL.Image.open(BytesIO(r.content))\n except PIL.UnidentifiedImageError as e:\n logger.error(f\"{image_id} : {url}: {e}\")\n return\n elif r.status_code == 129:\n time.sleep(60)\n logger.warning(\"To many requests, sleep for 60s...\")\n flickr_download(x, min_edge_size=min_edge_size, size_suffix=size_suffix)\n else:\n logger.error(f\"{image_id} : {url}: {r.status_code}\")\n return None\n\n if image.mode != \"RGB\":\n image = image.convert(\"RGB\")\n\n # resize if necessary\n image = _thumbnail(image, min_edge_size)\n # convert to jpeg\n fp = BytesIO()\n image.save(fp, \"JPEG\")\n\n raw_bytes = fp.getvalue()\n return {\"image\": raw_bytes, \"id\": image_id}\n\n\nclass ImageDataloader:\n def __init__(self, url_csv: Path, shuffle=False, nrows=None):\n\n logger.info(\"Read dataset\")\n self.df = pd.read_csv(\n url_csv, names=[\"image_id\", \"url\"], header=None, nrows=nrows\n )\n # remove rows without url\n self.df = self.df.dropna()\n if shuffle:\n logger.info(\"Shuffle images\")\n self.df = self.df.sample(frac=1, random_state=10)\n logger.info(f\"Number of URLs: {len(self.df.index)}\")\n\n def __len__(self):\n return len(self.df.index)\n\n def __iter__(self):\n for image_id, url in zip(self.df[\"image_id\"].values, self.df[\"url\"].values):\n yield {\"image_id\": image_id, \"url\": url}\n\n\ndef parse_args():\n args = ArgumentParser()\n args.add_argument(\n \"--threads\",\n type=int,\n default=24,\n help=\"Number of threads to download and process images\",\n )\n args.add_argument(\n \"--output\",\n type=Path,\n default=Path(\"resources/images/mp16\"),\n help=\"Output directory where images are stored\",\n )\n args.add_argument(\n \"--url_csv\",\n type=Path,\n default=Path(\"resources/mp16_urls.csv\"),\n help=\"CSV with Flickr image id and URL for downloading\",\n )\n args.add_argument(\n \"--size\",\n type=int,\n default=320,\n help=\"Rescale image to a minimum edge size of SIZE\",\n )\n args.add_argument(\n \"--size_suffix\",\n type=str,\n default=\"z\",\n help=\"Image size suffix according to the Flickr API; Empty string for original image\",\n )\n args.add_argument(\"--nrows\", type=int)\n args.add_argument(\n \"--shuffle\", action=\"store_true\", help=\"Shuffle list of URLs before downloading\"\n )\n return args.parse_args()\n\n\ndef main():\n\n image_loader = ImageDataloader(args.url_csv, nrows=args.nrows, shuffle=args.shuffle)\n\n counter_successful = 0\n with Pool(args.threads) as p:\n with MsgPackWriter(args.output) as f:\n start = time.time()\n for i, x in enumerate(\n p.imap(\n partial(\n flickr_download,\n size_suffix=args.size_suffix,\n min_edge_size=args.size,\n ),\n image_loader,\n )\n ):\n if x is None:\n continue\n\n f.write(x)\n counter_successful += 1\n\n if i % 1000 == 0:\n end = time.time()\n logger.info(f\"{i}: {1000 / (end - start):.2f} image/s\")\n start = end\n logger.info(\n f\"Sucesfully downloaded {counter_successful}/{len(image_loader)} images ({counter_successful / len(image_loader):.3f})\"\n )\n return 0\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n args.output.mkdir(parents=True, exist_ok=True)\n\n logger = logging.getLogger(\"ImageDownloader\")\n logger.setLevel(logging.INFO)\n fh = logging.FileHandler(str(args.output / \"writer.log\"))\n fh.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n sys.exit(main())\n" ]
[ [ "pandas.read_csv" ] ]
prasun2106/customer_churn_using_neural_networks
[ "e1435535a0265c9bfbc0ce95a496e3e97ecaa82b" ]
[ "customer_churn_nn.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n# In[2]:\n\n\n# Step 1: Import and Preprocessing\n# Importing the dataset\ndataset = pd.read_csv('data/Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13]\ny = dataset.iloc[:, 13]\n\n\n# In[3]:\n\n\nX.head()\n\n\n# In[4]:\n\n\nX.nunique()\n\n\n# In[5]:\n\n\nX.columns\n\n\n# In[6]:\n\n\none_hot_columns = [col for col in X.columns if col not in ['CreditScore','Age','Balance', 'EstimatedSalary']]\n\n\n# In[7]:\n\n\nX = pd.get_dummies(X, columns = one_hot_columns)\n\n\n# In[8]:\n\n\n# Convert Ag in buckets\nX['Age'] = pd.cut(X['Age'], 10)\n\n\n# In[9]:\n\n\nX = pd.get_dummies(X,columns = ['Age'])\n\n\n# In[10]:\n\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nX = pd.DataFrame(scaler.fit_transform(X),columns = X.columns)\n\n\n# In[11]:\n\n\n# Feature Scaling\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\n\n# In[26]:\n\n\n# There are two ways of initializing a neiral network:\n# 1. by defining the sequence of layers\n# 2. by defining the graph\n\n# In this problem, we will intialize it by defining the seqquence of layers\n\n# Step 2: Import libraries\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# Step 3: Initialize\nclassifier_initial = Sequential()\n\n# Step 4: Add layers\n\n# Input layer (designated by input_dim = 11) and first hidden layer\nclassifier_initial.add(Dense(units = 19 , kernel_initializer = 'uniform', activation = 'relu', input_dim = 37))\n# second hidden layer\nclassifier_initial.add(Dense(units = 19 , kernel_initializer = 'uniform', activation = 'relu'))\n# output layer\nclassifier_initial.add(Dense(units = 1 , kernel_initializer = 'uniform', activation = 'sigmoid'))\n\n# Step 5: compile ann - apply stochastic gradient descent\nclassifier_initial.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Step 6: Fit the model:\nclassifier_initial.fit(X_train,y_train, batch_size = 10, epochs = 10)\n\n\n# In[27]:\n\n\n# shape of weights\nfor elements in classifier_initial.get_weights():\n print(elements.shape)\n\n\n# In[28]:\n\n\n# Step 7: Make Predictions\ny_pred = classifier_initial.predict(X_test)\n# y_pred is probabolity --> convert it into class prediction to get y_pred_2 \ny_pred_2 = pd.DataFrame(y_pred).apply(lambda row: 1 if row[0]>0.5 else 0, axis = 1)\n\n\n# In[29]:\n\n\n# Step 8: Accuracy\nimport sklearn.metrics as metrics\nprint(f'accuracy: {metrics.accuracy_score(y_test, y_pred_2)}')\nprint(f'f1_score: {metrics.f1_score(y_test, y_pred_2)}')\nprint(f'precision: {metrics.precision_score(y_test, y_pred_2)}')\nprint(f'recall: {metrics.recall_score(y_test, y_pred_2)}')\n\n\n# # Step 9: Evaluating the ANN:\n# Judging our models's performance on one accuracy and one test set is not the best way to evaluate the model. Changing the test set will change the accuracy of our model slightly. To curb this issue, we will use k-fold cross validation.\n# \n# !['cv'](images/cv.png)\n# \n# ### Cross validation steps:\n# 1. Train on k-1 folds\n# 2. Test on remaining one\n# 3. Take mean of all k accuracies\n# 4. Find standard deviation of all accuracies\n# 5. Based on accuracy and standard deviations, we can see which of the following cases our model satisfies:\n# \n# !['bias_variance'](images/bias_variance.JPG)\n# \n# ### Implementation steps:\n# \n# 1. cross_val_score is sklearn function\n# 2. create a keras wrapper for sklearn so that the keras classifier can be used in sklearn cross_val_score\n# 3. \n\n# In[16]:\n\n\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\n\n\n# In[17]:\n\n\n# defining the function to be passed to KerasClassifier to convert it to sklearn classifier\n# only define the nn architecture. training and testing will be done by cross_val_Score\ndef nn_architecture():\n #initialize\n classifier = Sequential()\n # add input and first hidden layer\n classifier.add(Dense(units = 19, activation = 'relu', kernel_initializer= 'uniform' , input_dim = 37))\n # add second hidden layer\n classifier.add(Dense(units = 19, activation = 'relu', kernel_initializer= 'uniform' ))\n # add output layer\n classifier.add(Dense(units = 1, activation = 'sigmoid', kernel_initializer= 'uniform'))\n # compile\n classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n return classifier\n\n\n# In[18]:\n\n\nclassifier_2 = KerasClassifier(build_fn= nn_architecture, batch_size = 10, nb_epoch = 100)\n\n\n# In[19]:\n\n\naccuracies = cross_val_score(estimator = classifier_2,X = X_train, y = y_train, cv = 10, n_jobs=-1)\n\n\n# In[20]:\n\n\navg_accuracy = accuracies.mean()\nstd_accuracy = accuracies.std()\n\nprint(f'accuracy = {avg_accuracy*100:.2f}% +/- {std_accuracy*100:.2f}%')\n\n\n# # Step 10: Improving Accuracy\n# 1. Dropout Regularisation\n# 2. Hyper parametric tuning\n\n# In[39]:\n\n\nfrom keras.layers import Dropout\n#nn architecture with dropouts\ndef nn_architecture_2():\n classifier = Sequential()\n # add layers with dropout regularization\n classifier.add(Dense(units = 19, activation = 'relu', kernel_initializer = 'uniform', input_dim = 37))\n classifier.add(Dropout(rate = 0.1))\n classifier.add(Dense(units = 19, activation = 'relu', kernel_initializer = 'uniform'))\n classifier.add(Dropout(rate = 0.1))\n classifier.add(Dense(units = 1, activation = 'sigmoid', kernel_initializer = 'uniform'))\n\n # compile\n classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n \n return classifier\n# cross validation\nclassifier_wrapper = KerasClassifier(build_fn = nn_architecture_2, batch_size = 10, nb_epoch = 100)\naccuracies_dropout = cross_val_score(estimator=classifier_wrapper, X = X_train, y = y_train, cv = 10)\n\n\n# In[ ]:\n\n\n\n\n\n# In[40]:\n\n\n# print(f'training set initital accuracy: {metrics.accuracy_score(y_train, classifier_initial(X_train))}')\nprint(f'test set initital accuracy: {metrics.accuracy_score(y_test, y_pred_2)}')\nprint(f'cross validation accuracy: {avg_accuracy}')\nprint(f'training set accuracy after dropout regularisation: {accuracies_dropout.mean()}')\n\n\n# In[41]:\n\n\naccuracies.mean()\n\n\n# ### Hyperparametric tuning\n\n# In[55]:\n\n\ndef nn_architecture_3(optimizer):\n classifier = Sequential()\n # add layers with dropout regularization\n classifier.add(Dense(units = 19, activation = 'relu', kernel_initializer = 'uniform', input_dim = 37))\n classifier.add(Dropout(rate = 0.1))\n classifier.add(Dense(units = 19, activation = 'relu', kernel_initializer = 'uniform'))\n classifier.add(Dropout(rate = 0.1))\n classifier.add(Dense(units = 1, activation = 'sigmoid', kernel_initializer = 'uniform'))\n\n # compile\n classifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy'])\n \n return classifier\n\n\n# In[59]:\n\n\nfrom sklearn.model_selection import GridSearchCV\nclassifier = KerasClassifier(build_fn=nn_architecture_3) #dont add batch_size and nb_epochs as we will find them using grid_search\nparameters = {'batch_size': [25,32],\n 'nb_epoch':[100,500],\n 'optimizer':['adam','rmsprop']}\n\n\n# In[60]:\n\n\ngrid_search = GridSearchCV(estimator= classifier, param_grid= parameters, \n scoring = 'accuracy',\n cv = 10)\n\n\n# In[61]:\n\n\ngrid_search = grid_search.fit(X_train, y_train)\n\n\n# In[63]:\n\n\nbest_param = grid_search.best_params_\nbest_accuracy = grid_search.best_score_\n\n\n# In[64]:\n\n\nbest_param\n\n\n# In[65]:\n\n\nbest_accuracy\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "sklearn.model_selection.GridSearchCV", "pandas.read_csv", "sklearn.model_selection.cross_val_score", "sklearn.metrics.accuracy_score", "sklearn.metrics.precision_score", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "pandas.cut", "sklearn.metrics.f1_score", "sklearn.preprocessing.StandardScaler", "sklearn.metrics.recall_score", "pandas.get_dummies" ] ]
willismax/machine-learning-engineering-for-production-public
[ "3602b1bea0744b97658cbbc2d61072f7dcef33d7" ]
[ "course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py" ]
[ "import pickle\nimport numpy as np\nfrom typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, conlist\n\n\n\napp = FastAPI(title=\"Predicting Wine Class with batching!!\")\n\n# Open classifier in global scope\nwith open(\"models/wine.pkl\", \"rb\") as file:\n clf = pickle.load(file)\n\n\nclass Wine(BaseModel):\n batches: List[conlist(item_type=float, min_items=13, max_items=13)]\n\n\[email protected](\"/predict\")\ndef predict(wine: Wine):\n batches = wine.batches\n np_batches = np.array(batches)\n pred = clf.predict(np_batches).tolist()\n return {\"Prediction\": pred}\n" ]
[ [ "numpy.array" ] ]
shujaatkhan/HARK
[ "8dfaa7e03789bd380d7d314f760949c6daf4041c" ]
[ "HARKsimulation.py" ]
[ "'''\nFunctions for generating simulated data and shocks.\n'''\n\nfrom __future__ import division\nimport warnings # A library for runtime warnings\nimport numpy as np # Numerical Python\n\ndef drawMeanOneLognormal(N, sigma=1.0, seed=0):\n '''\n Generate arrays of mean one lognormal draws. The sigma input can be a number\n or list-like. If a number, output is a length N array of draws from the\n lognormal distribution with standard deviation sigma. If a list, output is\n a length T list whose t-th entry is a length N array of draws from the\n lognormal with standard deviation sigma[t].\n\n Parameters\n ----------\n N : int\n Number of draws in each row.\n sigma : float or [float]\n One or more standard deviations. Number of elements T in sigma\n determines number of rows of output.\n seed : int\n Seed for random number generator.\n\n Returns:\n ------------\n draws : np.array or [np.array]\n T-length list of arrays of mean one lognormal draws each of size N, or\n a single array of size N (if sigma is a scalar).\n '''\n # Set up the RNG\n RNG = np.random.RandomState(seed)\n \n if type(sigma) == float: # Return a single array of length N\n mu = -0.5*sigma**2\n draws = RNG.lognormal(mean=mu, sigma=sigma, size=N)\n else: # Set up empty list to populate, then loop and populate list with draws\n draws=[]\n for sig in sigma: \n mu = -0.5*(sig**2)\n draws.append(RNG.lognormal(mean=mu, sigma=sig, size=N)) \n return draws\n \ndef drawLognormal(N,mu=0.0,sigma=1.0,seed=0):\n '''\n Generate arrays of mean one lognormal draws. The sigma input can be a number\n or list-like. If a number, output is a length N array of draws from the\n lognormal distribution with standard deviation sigma. If a list, output is\n a length T list whose t-th entry is a length N array of draws from the\n lognormal with standard deviation sigma[t].\n\n Parameters\n ----------\n N : int\n Number of draws in each row.\n sigma : float or [float]\n One or more standard deviations. Number of elements T in sigma\n determines number of rows of output.\n seed : int\n Seed for random number generator.\n\n Returns:\n ------------\n draws : np.array or [np.array]\n T-length list of arrays of mean one lognormal draws each of size N, or\n a single array of size N (if sigma is a scalar).\n '''\n # Set up the RNG\n RNG = np.random.RandomState(seed)\n \n if type(sigma) == float: # Return a single array of length N\n draws = RNG.lognormal(mean=mu, sigma=sigma, size=N)\n else: # Set up empty list to populate, then loop and populate list with draws\n draws=[]\n for j in range(len(sigma)): \n draws.append(RNG.lognormal(mean=mu[j], sigma=sigma[j], size=N)) \n return draws\n \n \ndef drawNormal(N, mu=0.0, sigma=1.0, seed=0):\n '''\n Generate arrays of normal draws. The mu and sigma inputs can be numbers or\n list-likes. If a number, output is a length N array of draws from the normal\n distribution with mean mu and standard deviation sigma. If a list, output is\n a length T list whose t-th entry is a length N array with draws from the\n normal distribution with mean mu[t] and standard deviation sigma[t].\n\n Parameters\n ----------\n N : int\n Number of draws in each row.\n mu : float or [float]\n One or more means. Number of elements T in mu determines number of rows\n of output.\n sigma : float or [float]\n One or more standard deviations. Number of elements T in sigma\n determines number of rows of output.\n seed : int\n Seed for random number generator.\n\n Returns\n -------\n draws : np.array or [np.array]\n T-length list of arrays of normal draws each of size N, or a single array\n of size N (if sigma is a scalar).\n '''\n # Set up the RNG\n RNG = np.random.RandomState(seed)\n \n if type(sigma) == float: # Return a single array of length N\n draws = sigma*RNG.randn(N) + mu\n else: # Set up empty list to populate, then loop and populate list with draws\n draws=[]\n for t in range(len(sigma)):\n draws.append(sigma[t]*RNG.randn(N) + mu[t]) \n return draws\n \ndef drawWeibull(N, scale=1.0, shape=1.0, seed=0):\n '''\n Generate arrays of Weibull draws. The scale and shape inputs can be \n numbers or list-likes. If a number, output is a length N array of draws from\n the Weibull distribution with the given scale and shape. If a list, output\n is a length T list whose t-th entry is a length N array with draws from the\n Weibull distribution with scale scale[t] and shape shape[t].\n \n Note: When shape=1, the Weibull distribution is simply the exponential dist.\n \n Mean: scale*Gamma(1 + 1/shape)\n\n Parameters\n ----------\n N : int\n Number of draws in each row.\n scale : float or [float]\n One or more scales. Number of elements T in scale determines number of\n rows of output.\n shape : float or [float]\n One or more shape parameters. Number of elements T in scale\n determines number of rows of output.\n seed : int\n Seed for random number generator.\n\n Returns:\n ------------\n draws : np.array or [np.array]\n T-length list of arrays of Weibull draws each of size N, or a single\n array of size N (if sigma is a scalar).\n '''\n # Set up the RNG\n RNG = np.random.RandomState(seed)\n \n if scale == 1:\n scale = float(scale)\n if type(scale) == float: # Return a single array of length N\n draws = scale*(-np.log(1.0-RNG.rand(N)))**(1.0/shape)\n else: # Set up empty list to populate, then loop and populate list with draws\n draws=[]\n for t in range(len(scale)):\n draws.append(scale[t]*(-np.log(1.0-RNG.rand(N)))**(1.0/shape[t])) \n return draws \n \ndef drawUniform(N, bot=0.0, top=1.0, seed=0):\n '''\n Generate arrays of uniform draws. The bot and top inputs can be numbers or\n list-likes. If a number, output is a length N array of draws from the\n uniform distribution on [bot,top]. If a list, output is a length T list\n whose t-th entry is a length N array with draws from the uniform distribution\n on [bot[t],top[t]].\n\n Parameters\n ----------\n N : int\n Number of draws in each row.\n bot : float or [float]\n One or more bottom values. Number of elements T in mu determines number\n of rows of output.\n top : float or [float]\n One or more top values. Number of elements T in top determines number of\n rows of output.\n seed : int\n Seed for random number generator.\n\n Returns\n -------\n draws : np.array or [np.array]\n T-length list of arrays of uniform draws each of size N, or a single\n array of size N (if sigma is a scalar).\n '''\n # Set up the RNG\n RNG = np.random.RandomState(seed)\n \n if type(bot) == float or type(bot) == int: # Return a single array of size N\n draws = bot + (top - bot)*RNG.rand(N)\n else: # Set up empty list to populate, then loop and populate list with draws\n draws=[]\n for t in range(len(bot)):\n draws.append(bot[t] + (top[t] - bot[t])*RNG.rand(N)) \n return draws\n \ndef drawBernoulli(N,p=0.5,seed=0):\n '''\n Generates arrays of booleans drawn from a simple Bernoulli distribution.\n The input p can be a float or a list-like of floats; its length T determines\n the number of entries in the output. The t-th entry of the output is an\n array of N booleans which are True with probability p[t] and False otherwise.\n \n Arguments\n ---------\n N : int\n Number of draws in each row.\n p : float or [float]\n Probability or probabilities of the event occurring (True). \n seed : int\n Seed for random number generator.\n\n Returns\n -------\n draws : np.array or [np.array]\n T-length list of arrays of Bernoulli draws each of size N, or a single\n array of size N (if sigma is a scalar).\n ''' \n # Set up the RNG\n RNG = np.random.RandomState(seed)\n\n if type(p) == float:# Return a single array of size N\n draws = RNG.uniform(size=N) < p\n else: # Set up empty list to populate, then loop and populate list with draws:\n draws=[]\n for t in range(len(p)):\n draws.append(RNG.uniform(size=N) < p[t])\n return draws\n \ndef drawDiscrete(N,P=[1.0],X=[0.0],exact_match=False,seed=0):\n '''\n Simulates N draws from a discrete distribution with probabilities P and outcomes X.\n \n Parameters\n ----------\n P : np.array\n A list of probabilities of outcomes.\n X : np.array\n A list of discrete outcomes.\n N : int\n Number of draws to simulate.\n exact_match : boolean\n Whether the draws should \"exactly\" match the discrete distribution (as\n closely as possible given finite draws). When True, returned draws are\n a random permutation of the N-length list that best fits the discrete\n distribution. When False (default), each draw is independent from the\n others and the result could deviate from the input.\n seed : int\n Seed for random number generator.\n \n Returns\n -------\n draws : np.array\n An array draws from the discrete distribution; each element is a value in X.\n ''' \n # Set up the RNG\n RNG = np.random.RandomState(seed)\n \n if exact_match:\n events = np.arange(P.size) # just a list of integers\n cutoffs = np.round(np.cumsum(P)*N) # cutoff points between discrete outcomes\n top = 0\n # Make a list of event indices that closely matches the discrete distribution\n event_list = []\n for j in range(events.size):\n bot = top\n top = cutoffs[j]\n event_list += (top-bot)*[events[j]]\n # Randomly permute the event indices and store the corresponding results\n event_draws = RNG.permutation(event_list)\n draws = X[event_draws]\n else:\n # Generate a cumulative distribution\n base_draws = RNG.uniform(size=N)\n cum_dist = np.cumsum(P)\n \n # Convert the basic uniform draws into discrete draws\n indices = cum_dist.searchsorted(base_draws)\n draws = np.asarray(X)[indices]\n return draws\n \n \nif __name__ == '__main__': \n print(\"Sorry, HARKsimulation doesn't actually do anything on its own.\")\n print(\"To see some examples of its functions in action, look at any\")\n print(\"of the model modules in /ConsumptionSavingModel. In the future, running\")\n print(\"this module will show examples of each function in the module.\")\n " ]
[ [ "numpy.asarray", "numpy.arange", "numpy.random.RandomState", "numpy.cumsum" ] ]
ssklykov/collection_numCalc
[ "f6c69aa582fc811b998a0989b99157b8566c884f" ]
[ "Regression/SampleValues.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nClass for modelling samples values (measurments with some deviations)\nDeveloped in Spyder IDE\n@author: ssklykov\n\"\"\"\n# %% \"Dependecies\" - imports\nimport numpy as np\nimport math\n\n\n# %% Class itself\nclass GenerateSample():\n \"\"\"Class for generating sample values\"\"\"\n xMin = 0; xMax = 1; nSamples = 5; percentError = 10; a = 1; b = 0; nDigits = 3\n\n def __init__(self, a: float, b: float, xMin: float, xMax: float, nSamples: int, percentError: int, nDigits: int):\n self.xMin = xMin; self.xMax = xMax; self.a = a; self.b = b; self.nDigits = nDigits\n self.nSamples = nSamples; self.percentError = percentError\n\n def generateSamplePoint(self, x: float) -> tuple:\n \"\"\" Mimicring the measurements with errors \"\"\"\n n = 10 # modelling how many points \"have been measured\" for calcution mean values and standard deviations\n y = np.zeros(n, dtype=float); sum = 0.0\n for i in range(n):\n # modelling a measurment with an error\n rand1 = 1+0.01*(np.random.randint(-self.percentError, self.percentError+1)) # generation of rand coeff 1\n rand2 = 1+0.01*(np.random.randint(-self.percentError, self.percentError+1)) # rand coeff 2\n y[i] = self.a*rand1*x + self.b*rand2\n sum += y[i]\n # Calculation of returning values\n yMean = sum / n # Mean value\n sum = 0.0 # making again it zero!\n for i in range(n):\n sum += pow((y[i]-yMean), 2) # Calculation of (y[i] - yMean)**2\n yStD = math.sqrt(sum/(n-1)) # Calculation of estimation of a standard deviation\n return (yMean, yStD)\n\n def generateSampleValues(self) -> tuple:\n \"\"\" Making the samples \"\"\"\n x = np.linspace(self.xMin, self.xMax, self.nSamples)\n for i in range(self.nSamples):\n x[i] = round(x[i], self.nDigits)\n yMean = np.zeros(self.nSamples, dtype=float)\n yStD = np.zeros(self.nSamples, dtype=float)\n # Generation sample values\n for i in range(self.nSamples):\n (yMean[i], yStD[i]) = self.generateSamplePoint(x[i])\n yMean[i] = round(yMean[i], self.nDigits)\n yStD[i] = round(yStD[i], self.nDigits)\n return (x, yMean, yStD)\n" ]
[ [ "numpy.zeros", "numpy.linspace", "numpy.random.randint" ] ]
markovalexander/DDPM
[ "8fa813ac0c27afb1a8133b6d57ee48223629b684" ]
[ "lib/samplers.py" ]
[ "from abc import ABC, abstractmethod\n\nimport torch\nimport numpy as np\n\n\nclass AbstractSampler(ABC):\n @abstractmethod\n def weights(self):\n ...\n\n def sample(self, batch_size, device):\n weights = self.weights()\n probs = weights / np.sum(weights)\n indxes = np.random.choice(len(probs), size=(batch_size,), p=probs)\n time = torch.from_numpy(indxes).to(device=device, dtype=torch.int64)\n weights = 1 / (len(probs) * probs[indxes])\n weights = torch.from_numpy(weights).to(device=device, dtype=torch.float32)\n return time, weights\n\n def update_with_all_losses(self, ts, losses):\n pass\n\n\nclass UniformSampler(AbstractSampler):\n def __init__(self, diffusion):\n self.diffusion = diffusion\n self._weights = np.ones([diffusion.num_timesteps])\n\n def weights(self):\n return self._weights\n\n\nclass LossSecondMomentResampler(AbstractSampler):\n def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):\n self.diffusion = diffusion\n self.history_per_term = history_per_term\n self.uniform_prob = uniform_prob\n self._loss_history = np.zeros(\n [diffusion.num_timesteps, history_per_term], dtype=np.float64\n )\n self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)\n\n def weights(self):\n if not self._warmed_up():\n return np.ones([self.diffusion.num_timesteps], dtype=np.float32)\n\n weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))\n weights /= np.sum(weights)\n weights *= 1 - self.uniform_prob\n weights += self.uniform_prob / len(weights)\n return weights\n\n def update_with_all_losses(self, ts, losses):\n for t, loss in zip(ts, losses):\n if self._loss_counts[t] == self.history_per_term:\n self._loss_history[t, :-1] = self._loss_history[t, 1:]\n self._loss_history[t, -1] = loss\n else:\n self._loss_history[t, self._loss_counts[t]] = loss\n self._loss_counts[t] += 1\n\n def _warmed_up(self):\n return (self._loss_counts == self.history_per_term).all()\n\n\ndef get_time_sampler(sampler_type):\n if sampler_type.upper() == \"LOSS-SECOND-MOMENT\":\n return LossSecondMomentResampler\n else:\n return UniformSampler\n" ]
[ [ "torch.from_numpy", "numpy.ones", "numpy.mean", "numpy.zeros", "numpy.sum" ] ]
skn123/statismo-1
[ "a380f33cf070d1c4ba624db8b0c6d946d2aecabf" ]
[ "modules/VTK/wrapping/tests/statismoTests/test_builders.py" ]
[ "#\n# This file is part of the statismo library.\n#\n# Author: Marcel Luethi ([email protected])\n#\n# Copyright (c) 2011 University of Basel\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# Neither the name of the project's author nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\n# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n#\nimport vtk\nimport statismovtk as statismo\nfrom statutils import get_polydata_dir, get_surrogates_data_dir, get_data_files, \\\n read_vtkpd, get_point_from_id, build_pd_manager, get_structured_points_dir, \\\n build_sp_manager, get_coords_from_id\n\nimport unittest\nimport tempfile\nfrom os import listdir\nfrom os.path import join\nfrom scipy import randn, log, any, identity\nfrom numpy import isnan, zeros\nfrom numpy.lib.scimath import sqrt\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.pd_files = get_data_files(get_polydata_dir())\n self.pd_representer, self.pd_manager = build_pd_manager(\n get_polydata_dir())\n\n self.sp_files = get_data_files(get_structured_points_dir())\n self.sp_representer, self.sp_manager = build_sp_manager(\n get_structured_points_dir())\n\n def tearDown(self):\n pass\n\n def assertPointsAlmostEquals(self, pts1, pts2, numPoints, noise):\n for i in range(0, pts1.GetNumberOfPoints(), pts1.GetNumberOfPoints() // numPoints):\n self.assertTrue(\n abs(pts1.GetPoint(i)[0] - pts2.GetPoint(i)[0]) <= max(sqrt(noise), 1e-2))\n self.assertTrue(\n abs(pts1.GetPoint(i)[1] - pts2.GetPoint(i)[1]) <= max(sqrt(noise), 1e-2))\n self.assertTrue(\n abs(pts1.GetPoint(i)[2] - pts2.GetPoint(i)[2]) <= max(sqrt(noise), 1e-2))\n\n def build_and_test_model(self, model, manager, files, noise):\n self.assertTrue(model.GetNumberOfPrincipalComponents() <= len(files))\n\n # we cannot have negative eigenvalues\n self.assertTrue((model.GetPCAVarianceVector() >= 0).all() == True)\n self.assertTrue(isnan(model.GetPCAVarianceVector()).any() == False)\n\n # we project a dataset into the model and try to restore it.\n samples = manager.GetData()\n\n sample = samples[0].GetSample()\n coeffs = model.ComputeCoefficients(sample)\n restored_sample = model.DrawSample(coeffs)\n\n self.assertEqual(sample.GetNumberOfPoints(),\n restored_sample.GetNumberOfPoints())\n self.assertPointsAlmostEquals(sample, restored_sample, 100, noise)\n\n # check if the scores can be used to restore the data in the datamanager\n scores = model.GetModelInfo().GetScoresMatrix()\n for i in range(0, scores.shape[1]):\n sample_from_scores = model.DrawSample(scores[:, i])\n sample_from_dm = samples[i].GetSample()\n\n self.assertPointsAlmostEquals(\n sample_from_scores, sample_from_dm, 100, noise)\n\n return model\n\n def build_and_test_model_pd(self, noise):\n builder = statismo.PCAModelBuilder_vtkPD.Create()\n model = builder.BuildNewModel(self.pd_manager.GetData(), noise)\n return self.build_and_test_model(model, self.pd_manager, self.pd_files, noise)\n\n def build_and_test_model_sp(self, noise):\n builder = statismo.PCAModelBuilder_vtkSP.Create()\n model = builder.BuildNewModel(self.sp_manager.GetData(), noise)\n return self.build_and_test_model(model, self.sp_manager, self.sp_files, noise)\n\n def check_pca_model_no_score(self, model, files):\n self.assertTrue(model.GetNumberOfPrincipalComponents() <= len(files))\n\n # we cannot have negative eigenvalues\n self.assertTrue((model.GetPCAVarianceVector() >= 0).all() == True)\n\n scores = model.GetModelInfo().GetScoresMatrix()\n self.assertTrue(scores.shape[0] == 0 and scores.shape[1] == 0)\n\n def test_pcamodel_builder_noscore_pd(self):\n # check if a model can be build when there are no scores\n builder = statismo.PCAModelBuilder_vtkPD.Create()\n model = builder.BuildNewModel(self.pd_manager.GetData(), 0, False)\n self.check_pca_model_no_score(model, self.pd_files)\n\n def test_pcamodel_builder_noscore_sp(self):\n # check if a model can be build when there are no scores\n builder = statismo.PCAModelBuilder_vtkSP.Create()\n model = builder.BuildNewModel(self.sp_manager.GetData(), 0, False)\n self.check_pca_model_no_score(model, self.sp_files)\n\n def test_pcamodel_builder_nonoise_pd(self):\n model = self.build_and_test_model_pd(0)\n self.assertAlmostEqual(model.GetNoiseVariance(), 0)\n\n def test_pcamodel_builder_nonoise_sp(self):\n model = self.build_and_test_model_sp(0)\n self.assertAlmostEqual(model.GetNoiseVariance(), 0)\n\n def test_pcamodel_builder_noise_pd(self):\n model = self.build_and_test_model_pd(0.1)\n self.assertAlmostEqual(model.GetNoiseVariance(), 0.1)\n\n def test_pcamodel_builder_noise_sp(self):\n model = self.build_and_test_model_sp(0.1)\n self.assertAlmostEqual(model.GetNoiseVariance(), 0.1)\n\n def test_pcamodel_builder_largenoise_pd(self):\n model = self.build_and_test_model_pd(1000)\n self.assertAlmostEqual(model.GetNoiseVariance(), 1000)\n\n def test_pcamodel_builder_largenoise_sp(self):\n model = self.build_and_test_model_sp(1000)\n self.assertAlmostEqual(model.GetNoiseVariance(), 1000)\n\n def test_posterior_builder_check_mean_pd(self):\n # if we fix many points to correspond to one of the samples, and build a\n # partially fixed model, its mean should correspond to the sample\n fixed_pt_count = 100\n test_pt_count = 1000\n\n sample = self.pd_manager.GetData()[0].GetSample()\n pv_list = statismo.PointPointValueList()\n\n domain_points = self.pd_representer.GetDomain().GetDomainPoints()\n\n for pid in range(0, len(domain_points), len(domain_points) // fixed_pt_count):\n fixed_pt = domain_points[pid]\n value = statismo.vtkPoint(*get_point_from_id(sample, pid))\n pv_list.append(statismo.PointPointValuePair(fixed_pt, value))\n\n post_builder = statismo.PosteriorModelBuilder_vtkPD.Create()\n post_model = post_builder.BuildNewModel(\n self.pd_manager.GetData(), pv_list, 0.1, 0.1)\n\n partial_mean = post_model.DrawMean()\n\n # now the sample that we used to fix the point should be similar to the mean. We test it by\n for pid in range(0, sample.GetNumberOfPoints(), sample.GetNumberOfPoints() // test_pt_count):\n mean_pt = get_point_from_id(partial_mean, pid)\n sample_pt = get_point_from_id(sample, pid)\n self.assertAlmostEqual(mean_pt[0], sample_pt[0], 0)\n self.assertAlmostEqual(mean_pt[1], sample_pt[1], 0)\n self.assertAlmostEqual(mean_pt[2], sample_pt[2], 0)\n\n def test_posterior_builder_noconstraint_pd(self):\n # if we fix no point, it should be the same as building a normal pca model\n pv_list = statismo.PointPointValueList()\n\n post_builder = statismo.PosteriorModelBuilder_vtkPD.Create()\n post_model = post_builder.BuildNewModel(\n self.pd_manager.GetData(), pv_list, 0.1, 0.1)\n\n pcabuilder = statismo.PCAModelBuilder_vtkPD.Create()\n pca_model = pcabuilder.BuildNewModel(self.pd_manager.GetData(), 0.1)\n\n sample = self.pd_manager.GetData()[0].GetSample()\n coeffs_post_model = post_model.ComputeCoefficients(sample)\n coeffs_pca_model = pca_model.ComputeCoefficients(sample)\n for i in range(0, len(coeffs_post_model)):\n # the sign is allowed to change\n self.assertAlmostEqual(\n abs(coeffs_post_model[i]), abs(coeffs_pca_model[i]), 1)\n\n def test_posterior_builder_variance_pd(self):\n # checks whether with every added point, the variance is decreasing\n reference = self.pd_representer.GetReference()\n sample = self.pd_manager.GetData()[0].GetSample()\n points_count = sample.GetNumberOfPoints()\n pv_list = statismo.PointPointValueList()\n\n post_builder = statismo.PosteriorModelBuilder_vtkPD.Create()\n post_model = post_builder.BuildNewModel(\n self.pd_manager.GetData(), pv_list, 0.1, 0.1)\n total_var = post_model.GetPCAVarianceVector().sum()\n for pid in range(0, points_count, points_count // 10):\n ref_pt = statismo.vtkPoint(*get_point_from_id(reference, pid))\n pt = statismo.vtkPoint(*get_point_from_id(sample, pid))\n pv_list.append(statismo.PointPointValuePair(ref_pt, pt))\n\n post_builder = statismo.PosteriorModelBuilder_vtkPD.Create()\n post_model = post_builder.BuildNewModel(\n self.pd_manager.GetData(), pv_list, 0.1, 0.1)\n total_sdev_prev = total_var\n total_var = post_model.GetPCAVarianceVector().sum()\n self.assertTrue(total_var < total_sdev_prev)\n\n def test_posterior_builder_constraint_pd(self):\n # Checks if a point that is fixed really stays where it was constrained to stay\n reference = self.pd_representer.GetReference()\n sample = self.pd_manager.GetData()[0].GetSample()\n pv_list = statismo.PointPointValueList()\n\n ref_pt = get_point_from_id(reference, 0)\n fixed_pt = get_point_from_id(sample, 0)\n pv_list.append(statismo.PointPointValuePair(\n statismo.vtkPoint(*ref_pt), statismo.vtkPoint(*fixed_pt)))\n post_builder = statismo.PosteriorModelBuilder_vtkPD.Create()\n post_model = post_builder.BuildNewModel(\n self.pd_manager.GetData(), pv_list, 0.01, 0.01)\n\n # check for some samples if the points stay put\n coeffs1 = zeros(post_model.GetNumberOfPrincipalComponents())\n coeffs1[1] = 3\n coeffs2 = zeros(post_model.GetNumberOfPrincipalComponents())\n coeffs2[0] = -3\n\n for coeffs in [coeffs1, coeffs2]:\n constrained_sample = post_model.DrawSample(coeffs)\n self.assertAlmostEqual(\n constrained_sample.GetPoints().GetPoint(0)[0], fixed_pt[0], 1)\n self.assertAlmostEqual(\n constrained_sample.GetPoints().GetPoint(0)[1], fixed_pt[1], 1)\n self.assertAlmostEqual(\n constrained_sample.GetPoints().GetPoint(0)[2], fixed_pt[2], 1)\n\n def check_reduced_variance_builder(self, model, reduced_var_builder):\n reduced_component_count = model.GetNumberOfPrincipalComponents() // 2\n new_model = reduced_var_builder.BuildNewModelWithLeadingComponents(\n model, reduced_component_count)\n self.assertTrue(new_model.GetNumberOfPrincipalComponents()\n == reduced_component_count)\n\n for total_var_ratio in [1.0, 0.9, 0.8, 0.7, 0.6, 0.4, 0.2, 0.1]:\n reduced_model = reduced_var_builder.BuildNewModelWithVariance(\n model, total_var_ratio)\n\n # we keep at least the required percentage of total variance\n self.assertTrue(reduced_model.GetPCAVarianceVector().sum(\n ) / model.GetPCAVarianceVector().sum() >= total_var_ratio)\n\n # make sure that one component less would not reach the variance\n self.assertTrue(reduced_model.GetPCAVarianceVector()[\n 0:-1].sum() / model.GetPCAVarianceVector().sum() < total_var_ratio)\n\n # check that there is a reduction (though we cannot say how much, as the specified variance is a lower bound)\n reduced_model05 = reduced_var_builder.BuildNewModelWithVariance(\n model, 0.5)\n self.assertTrue(reduced_model05.GetPCAVarianceVector().sum()\n <= model.GetPCAVarianceVector().sum())\n\n def test_reduced_variance_builder_pd(self):\n builder = statismo.PCAModelBuilder_vtkPD.Create()\n\n model = builder.BuildNewModel(self.pd_manager.GetData(), 0.)\n reduced_var_builder = statismo.ReducedVarianceModelBuilder_vtkPD.Create()\n\n self.check_reduced_variance_builder(model, reduced_var_builder)\n\n def test_reduced_variance_builder_sp(self):\n builder = statismo.PCAModelBuilder_vtkSP.Create()\n\n model = builder.BuildNewModel(self.sp_manager.GetData(), 0.)\n reduced_var_builder = statismo.ReducedVarianceModelBuilder_vtkSP.Create()\n\n self.check_reduced_variance_builder(model, reduced_var_builder)\n\n def check_reduced_variance_builder_noscore(self, model, reduced_var_builder):\n reduced_component_count = model.GetNumberOfPrincipalComponents() // 2\n new_model = reduced_var_builder.BuildNewModelWithLeadingComponents(\n model, reduced_component_count)\n self.assertTrue(new_model.GetNumberOfPrincipalComponents()\n == reduced_component_count)\n self.assertTrue(\n new_model.GetModelInfo().GetScoresMatrix().shape[0] == 0)\n\n def test_reduced_variance_builder_noscore_pd(self):\n # check that a model can also be reduced when no scores are present\n builder = statismo.PCAModelBuilder_vtkPD.Create()\n\n model = builder.BuildNewModel(self.pd_manager.GetData(), 0., False)\n reduced_var_builder = statismo.ReducedVarianceModelBuilder_vtkPD.Create()\n\n self.check_reduced_variance_builder_noscore(model, reduced_var_builder)\n\n def test_reduced_variance_builder_noscore_sp(self):\n # check that a model can also be reduced when no scores are present\n builder = statismo.PCAModelBuilder_vtkSP.Create()\n\n model = builder.BuildNewModel(self.sp_manager.GetData(), 0., False)\n reduced_var_builder = statismo.ReducedVarianceModelBuilder_vtkSP.Create()\n\n self.check_reduced_variance_builder_noscore(model, reduced_var_builder)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.lib.scimath.sqrt" ] ]
rodrigolece/whizz-library
[ "beb3b6a31000239843bdfae8f4edd2a700749ce7" ]
[ "whizzlibrary/quarters.py" ]
[ "\nimport numpy as np\n\n\n\ndef roundNearestQuarter(x):\n return 25*np.round(x/25)\n\ndef floorNearestQuarter(x):\n return 25*np.floor(x/25)\n\n\ndef histogramQuarters(x):\n sorted_array = np.sort(x)\n m, M = sorted_array[0], sorted_array[-1]\n bins = np.arange(m, M+26, 25) - 12.5\n\n counts = np.zeros(len(bins) - 1)\n i = 0\n\n for entry in sorted_array[:-1]: # we treat last element as a special case\n if entry < bins[i+1]:\n counts[i] += 1\n else: # we do a search\n while entry > bins[i+1]:\n i += 1\n counts[i] += 1\n\n counts[-1] += 1 # last element will always be in last box\n\n return counts, bins\n\n\ndef errorStatistics(completed_vec, original_vec, verbose=True):\n # completed_vec should already have entries that are quarters\n\n signed_errors = completed_vec - original_vec\n counts, bins = histogramQuarters(signed_errors)\n\n centers = bins[:-1] + 12.5\n zro = np.where(centers == 0.0)[0][0]\n\n if len(counts) >= 5:\n output = np.array([ counts[zro], np.sum(counts[zro-1:zro+2]), np.sum(counts[zro-2:zro+3]),\n np.sum(counts[:zro]), np.sum(counts[zro+1:]) ])\n elif len(counts) >= 3:\n output = np.array([ counts[zro], np.sum(counts[zro-1:zro+2]), len(signed_errors),\n np.sum(counts[:zro]), np.sum(counts[zro+1:]) ])\n else:\n output = np.array([ counts[zro], len(signed_errors), len(signed_errors),\n np.sum(counts[:zro]), np.sum(counts[zro+1:]) ])\n output *= 100 / len(signed_errors)\n\n rmse = np.linalg.norm(signed_errors, 2) / np.sqrt(len(signed_errors))\n output = np.append(output, rmse)\n\n if verbose:\n print('Exact: \\t\\t %.2f %%' % output[0])\n print('Within 25:\\t %.2f %%' % output[1] )\n print('Within 50:\\t %.2f %%' % output[2] )\n print('Underestimated:\\t %.2f %%' % output[3])\n print('Overestimated:\\t %.2f %%' % output[4])\n print('Min: \\t\\t %d' % centers[0])\n print('Max: \\t\\t %d' % centers[-1])\n print('RMSE: \\t\\t %.2f' % output[5])\n\n return output\n" ]
[ [ "numpy.arange", "numpy.linalg.norm", "numpy.sort", "numpy.round", "numpy.append", "numpy.floor", "numpy.where", "numpy.sum" ] ]
rdcsung/practical-pytorch
[ "6c57013c16eb928232af5e9bbe886a41c4ac9f9e" ]
[ "conditional-char-rnn/train.py" ]
[ "# Practical PyTorch: Generating Names with a Conditional Character-Level RNN\n# https://github.com/spro/practical-pytorch\n\nimport glob\nimport unicodedata\nimport string\nimport random\nimport time\nimport math\n\nimport torch\nimport torch.nn as nn\n\nfrom data import *\nfrom model import *\n\nimport config\n\n# Training the Network\n\ndef train(category_tensor, input_line_tensor, target_line_tensor):\n hidden = rnn.init_hidden()\n optimizer.zero_grad()\n loss = 0\n \n for i in range(input_line_tensor.size()[0]):\n output, hidden = rnn(category_tensor, input_line_tensor[i], hidden)\n loss += criterion(output, target_line_tensor[i].view(1).to(config.HOST_DEVICE))\n\n loss.backward()\n optimizer.step()\n \n return output, loss.item() / input_line_tensor.size()[0]\n\ndef time_since(t):\n now = time.time()\n s = now - t\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\nn_epochs = 100000\nprint_every = 5000\nplot_every = 500\nall_losses = []\nloss_avg = 0 # Zero every plot_every epochs to keep a running average\nhidden_size = 128\nlearning_rate = 0.0005\n\nrnn = RNN(n_categories, n_letters, hidden_size, n_letters)\noptimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)\ncriterion = nn.CrossEntropyLoss()\n\nstart = time.time()\n\ndef save():\n torch.save(rnn, 'conditional-char-rnn.pt')\n\ntry:\n print(\"Training for %d epochs...\" % n_epochs)\n for epoch in range(1, n_epochs + 1):\n output, loss = train(*random_training_set())\n loss_avg += loss\n\n if epoch % print_every == 0:\n print('%s (%d %d%%) %.4f' % (time_since(start), epoch, epoch / n_epochs * 100, loss))\n\n if epoch % plot_every == 0:\n all_losses.append(loss_avg / plot_every)\n loss_avg = 0\n save()\n\nexcept KeyboardInterrupt:\n print(\"Saving before quit...\")\n save()\n\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.save" ] ]
ZhangSanFengByGit/toolkit
[ "9f2958bfd67d140afbc22f12c8d38995996330b0" ]
[ "got10k/trackers/__init__.py" ]
[ "from __future__ import absolute_import\n\nimport numpy as np\nimport time\nfrom PIL import Image\nimport cv2\n\nfrom ..utils.viz import show_frame\n\n\nclass Tracker(object):\n\n def __init__(self, name, is_deterministic=False):\n self.name = name\n self.is_deterministic = is_deterministic\n \n def init(self, image, box):\n raise NotImplementedError()\n\n def update(self, image):\n raise NotImplementedError()\n\n def track(self, img_files, box, visualize=False):\n frame_num = len(img_files)\n boxes = np.zeros((frame_num, 4))\n boxes[0] = box\n times = np.zeros(frame_num)\n\n for f, img_file in enumerate(img_files):\n image = cv2.imread(img_file) #BGR\n #if not image.mode == 'RGB':\n # image = image.convert('RGB')\n\n start_time = time.time()\n if f == 0:\n self.init(image, box)\n else:\n boxes[f, :] = self.update(image)\n times[f] = time.time() - start_time\n\n if visualize:\n show_frame(image, boxes[f, :])\n\n return boxes, times\n\n\nfrom .identity_tracker import IdentityTracker\n" ]
[ [ "numpy.zeros" ] ]
vishalbelsare/pcpca
[ "763e61c669a6cdadbd706e3cb0d553e5f5bd6ff7" ]
[ "experiments/realworld/scrnaseq/single_cell_bmmc.py" ]
[ "from pcpca import PCPCA, CPCA\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom os.path import join as pjoin\nfrom scipy.io import mmread\nfrom sklearn.decomposition import PCA\n\n\nDATA_DIR = \"../../../data/singlecell_bmmc\"\nN_COMPONENTS = 10\n\n\nif __name__ == \"__main__\":\n\n # Read in data\n # pretransplant1 = pd.read_csv(pjoin(DATA_DIR, \"clean\", \"pretransplant1.csv\"), index_col=0)\n # posttransplant1 = pd.read_csv(pjoin(DATA_DIR, \"clean\", \"posttransplant1.csv\"), index_col=0)\n\n pretransplant2 = pd.read_csv(\n pjoin(DATA_DIR, \"clean\", \"pretransplant2.csv\"), index_col=0\n )\n posttransplant2 = pd.read_csv(\n pjoin(DATA_DIR, \"clean\", \"posttransplant2.csv\"), index_col=0\n )\n\n healthy1 = pd.read_csv(pjoin(DATA_DIR, \"clean\", \"healthy1.csv\"), index_col=0)\n # healthy2 = pd.read_csv(pjoin(DATA_DIR, \"clean\", \"healthy2.csv\"), index_col=0)\n\n # Background is made up of healthy cells\n Y = healthy1.values # pd.concat([healthy1, healthy2], axis=0).values\n\n X = pd.concat([pretransplant2, posttransplant2], axis=0).values\n X_labels = [\"Pretransplant\" for _ in range(pretransplant2.shape[0])]\n X_labels.extend([\"Posttransplant\" for _ in range(posttransplant2.shape[0])])\n X_labels = np.array(X_labels)\n assert X_labels.shape[0] == X.shape[0]\n\n # Standardize\n Y -= Y.mean(0)\n Y /= Y.std(0)\n Y = Y.T\n X -= X.mean(0)\n X /= X.std(0)\n X = X.T\n\n n, m = X.shape[1], Y.shape[1]\n\n X_df = pd.DataFrame(X.T)\n X_df[\"condition\"] = X_labels\n\n import matplotlib\n\n font = {\"size\": 20}\n matplotlib.rc(\"font\", **font)\n matplotlib.rcParams[\"text.usetex\"] = True\n\n gamma_range = [0, 0.7, 0.9]\n\n plt.figure(figsize=(len(gamma_range) * 6, 5))\n\n for ii, gamma in enumerate(gamma_range):\n\n pcpca = PCPCA(gamma=n / m * gamma, n_components=N_COMPONENTS)\n X_reduced, Y_reduced = pcpca.fit_transform(X, Y)\n\n plt.subplot(1, len(gamma_range), ii + 1)\n if gamma == 0:\n plt.title(r\"$\\gamma^\\prime$={} (PPCA)\".format(gamma))\n else:\n plt.title(r\"$\\gamma^\\prime$={}\".format(gamma))\n\n X_reduced_df = pd.DataFrame(X_reduced.T[:, 2:4], columns=[\"PCPC1\", \"PCPC2\"])\n X_reduced_df[\"condition\"] = X_labels\n\n # Y_reduced_df = pd.DataFrame(Y_reduced.T[:, 2:4], columns=[\"PCPC1\", \"PCPC2\"])\n # Y_reduced_df['condition'] = [\n # \"Background\" for _ in range(Y_reduced_df.shape[0])]\n\n plot_df = X_reduced_df[\n X_reduced_df.condition.isin([\"Pretransplant\", \"Posttransplant\"])\n ]\n\n # plot_df = pd.concat([X_reduced_df, Y_reduced_df], axis=0)\n sns.scatterplot(\n data=plot_df,\n x=\"PCPC1\",\n y=\"PCPC2\",\n hue=\"condition\",\n alpha=0.5,\n palette=[\"green\", \"orange\"],\n )\n plt.xlabel(\"PCPC3\")\n plt.ylabel(\"PCPC4\")\n\n ax = plt.gca()\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles=handles[1:], labels=labels[1:])\n\n plt.tight_layout()\n plt.savefig(\"../../../plots/scrnaseq/pcpca_singlecell_bmmc.png\")\n\n plt.show()\n import ipdb\n\n ipdb.set_trace()\n" ]
[ [ "matplotlib.pyplot.gca", "pandas.concat", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.rc", "matplotlib.pyplot.ylabel" ] ]
LauritzRaisch/picosdk-python-wrappers
[ "08be77a2efd917a5d837e2caad8f771579a79de2", "08be77a2efd917a5d837e2caad8f771579a79de2" ]
[ "ps5000aExamples/ps5000aBlockCallbackExample.py", "ps4000aExamples/ps4000aStreamingExample.py" ]
[ "#\n# Copyright (C) 2018 Pico Technology Ltd. See LICENSE file for terms.\n#\n# PS5000A BLOCK MODE EXAMPLE\n# This example opens a 5000a driver device, sets up two channels and a trigger then collects a block of data.\n# This data is then plotted as mV against time in ns.\n\nimport ctypes\nimport numpy as np\nfrom picosdk.ps5000a import ps5000a as ps\nimport matplotlib.pyplot as plt\nfrom picosdk.functions import adc2mV, assert_pico_ok, mV2adc\nfrom picosdk.constants import PICO_STATUS\nimport time\n\n# Create chandle and status ready for use\nchandle = ctypes.c_int16()\nstatus = {}\n\n# Open 5000 series PicoScope\n# Resolution set to 12 Bit\nresolution =ps.PS5000A_DEVICE_RESOLUTION[\"PS5000A_DR_12BIT\"]\n# Returns handle to chandle for use in future API functions\nstatus[\"openunit\"] = ps.ps5000aOpenUnit(ctypes.byref(chandle), None, resolution)\n\ntry:\n assert_pico_ok(status[\"openunit\"])\nexcept: # PicoNotOkError:\n\n powerStatus = status[\"openunit\"]\n\n if powerStatus == 286:\n status[\"changePowerSource\"] = ps.ps5000aChangePowerSource(chandle, powerStatus)\n elif powerStatus == 282:\n status[\"changePowerSource\"] = ps.ps5000aChangePowerSource(chandle, powerStatus)\n else:\n raise\n\n assert_pico_ok(status[\"changePowerSource\"])\n\n# Set up channel A\n# handle = chandle\nchannel = ps.PS5000A_CHANNEL[\"PS5000A_CHANNEL_A\"]\n# enabled = 1\ncoupling_type = ps.PS5000A_COUPLING[\"PS5000A_DC\"]\nchARange = ps.PS5000A_RANGE[\"PS5000A_20V\"]\n# analogue offset = 0 V\nstatus[\"setChA\"] = ps.ps5000aSetChannel(chandle, channel, 1, coupling_type, chARange, 0)\nassert_pico_ok(status[\"setChA\"])\n\n# Set up channel B\n# handle = chandle\nchannel = ps.PS5000A_CHANNEL[\"PS5000A_CHANNEL_B\"]\n# enabled = 1\n# coupling_type = ps.PS5000A_COUPLING[\"PS5000A_DC\"]\nchBRange = ps.PS5000A_RANGE[\"PS5000A_2V\"]\n# analogue offset = 0 V\nstatus[\"setChB\"] = ps.ps5000aSetChannel(chandle, channel, 1, coupling_type, chBRange, 0)\nassert_pico_ok(status[\"setChB\"])\n\n# find maximum ADC count value\n# handle = chandle\n# pointer to value = ctypes.byref(maxADC)\nmaxADC = ctypes.c_int16()\nstatus[\"maximumValue\"] = ps.ps5000aMaximumValue(chandle, ctypes.byref(maxADC))\nassert_pico_ok(status[\"maximumValue\"])\n\n# Set up single trigger\n# handle = chandle\n# enabled = 1\nsource = ps.PS5000A_CHANNEL[\"PS5000A_CHANNEL_A\"]\nthreshold = int(mV2adc(500,chARange, maxADC))\n# direction = PS5000A_RISING = 2\n# delay = 0 s\n# auto Trigger = 1000 ms\nstatus[\"trigger\"] = ps.ps5000aSetSimpleTrigger(chandle, 1, source, threshold, 2, 0, 1000)\nassert_pico_ok(status[\"trigger\"])\n\n# Set number of pre and post trigger samples to be collected\npreTriggerSamples = 2500\npostTriggerSamples = 2500\nmaxSamples = preTriggerSamples + postTriggerSamples\n\n# Get timebase information\n# handle = chandle\ntimebase = 8\n# noSamples = maxSamples\n# pointer to timeIntervalNanoseconds = ctypes.byref(timeIntervalns)\n# pointer to maxSamples = ctypes.byref(returnedMaxSamples)\n# segment index = 0\ntimeIntervalns = ctypes.c_float()\nreturnedMaxSamples = ctypes.c_int32()\nstatus[\"getTimebase2\"] = ps.ps5000aGetTimebase2(chandle, timebase, maxSamples, ctypes.byref(timeIntervalns), ctypes.byref(returnedMaxSamples), 0)\nassert_pico_ok(status[\"getTimebase2\"])\n\n# setup callback function\nready = False\n\ndef block_callback(handle, statusCallback, param):\n global wasCalledBack, ready\n wasCalledBack = True\n if statusCallback != PICO_STATUS['PICO_CANCELLED']:\n ready = True\n \n# Convert the python function into a C function pointer.\ncFuncPtr = ps.BlockReadyType(block_callback)\n\n# Run block capture\n# handle = chandle\n# number of pre-trigger samples = preTriggerSamples\n# number of post-trigger samples = PostTriggerSamples\n# timebase = 8 = 80 ns (see Programmer's guide for mre information on timebases)\n# time indisposed ms = None (not needed in the example)\n# segment index = 0\n# lpReady = cFuncPtr\n# pParameter = None\nstatus[\"runBlock\"] = ps.ps5000aRunBlock(chandle, preTriggerSamples, postTriggerSamples, timebase, None, 0, cFuncPtr, None)\nassert_pico_ok(status[\"runBlock\"])\n\n# Check for data collection to finish using ps5000aIsReady\ncheck = ctypes.c_int16(0)\nwhile ready == False:\n time.sleep(0.01)\n \nprint(\"Capture finished\")\n\n\n# Create buffers ready for assigning pointers for data collection\nbufferAMax = (ctypes.c_int16 * maxSamples)()\nbufferAMin = (ctypes.c_int16 * maxSamples)() # used for downsampling which isn't in the scope of this example\nbufferBMax = (ctypes.c_int16 * maxSamples)()\nbufferBMin = (ctypes.c_int16 * maxSamples)() # used for downsampling which isn't in the scope of this example\n\n# Set data buffer location for data collection from channel A\n# handle = chandle\nsource = ps.PS5000A_CHANNEL[\"PS5000A_CHANNEL_A\"]\n# pointer to buffer max = ctypes.byref(bufferAMax)\n# pointer to buffer min = ctypes.byref(bufferAMin)\n# buffer length = maxSamples\n# segment index = 0\n# ratio mode = PS5000A_RATIO_MODE_NONE = 0\nstatus[\"setDataBuffersA\"] = ps.ps5000aSetDataBuffers(chandle, source, ctypes.byref(bufferAMax), ctypes.byref(bufferAMin), maxSamples, 0, 0)\nassert_pico_ok(status[\"setDataBuffersA\"])\n\n# Set data buffer location for data collection from channel B\n# handle = chandle\nsource = ps.PS5000A_CHANNEL[\"PS5000A_CHANNEL_B\"]\n# pointer to buffer max = ctypes.byref(bufferBMax)\n# pointer to buffer min = ctypes.byref(bufferBMin)\n# buffer length = maxSamples\n# segment index = 0\n# ratio mode = PS5000A_RATIO_MODE_NONE = 0\nstatus[\"setDataBuffersB\"] = ps.ps5000aSetDataBuffers(chandle, source, ctypes.byref(bufferBMax), ctypes.byref(bufferBMin), maxSamples, 0, 0)\nassert_pico_ok(status[\"setDataBuffersB\"])\n\n# create overflow loaction\noverflow = ctypes.c_int16()\n# create converted type maxSamples\ncmaxSamples = ctypes.c_int32(maxSamples)\n\n# Retried data from scope to buffers assigned above\n# handle = chandle\n# start index = 0\n# pointer to number of samples = ctypes.byref(cmaxSamples)\n# downsample ratio = 0\n# downsample ratio mode = PS5000A_RATIO_MODE_NONE\n# pointer to overflow = ctypes.byref(overflow))\nstatus[\"getValues\"] = ps.ps5000aGetValues(chandle, 0, ctypes.byref(cmaxSamples), 0, 0, 0, ctypes.byref(overflow))\nassert_pico_ok(status[\"getValues\"])\n\n\n# convert ADC counts data to mV\nadc2mVChAMax = adc2mV(bufferAMax, chARange, maxADC)\nadc2mVChBMax = adc2mV(bufferBMax, chBRange, maxADC)\n\n# Create time data\ntime = np.linspace(0, (cmaxSamples.value - 1) * timeIntervalns.value, cmaxSamples.value)\n\n# plot data from channel A and B\nplt.plot(time, adc2mVChAMax[:])\nplt.plot(time, adc2mVChBMax[:])\nplt.xlabel('Time (ns)')\nplt.ylabel('Voltage (mV)')\nplt.show()\n\n# Stop the scope\n# handle = chandle\nstatus[\"stop\"] = ps.ps5000aStop(chandle)\nassert_pico_ok(status[\"stop\"])\n\n# Close unit Disconnect the scope\n# handle = chandle\nstatus[\"close\"]=ps.ps5000aCloseUnit(chandle)\nassert_pico_ok(status[\"close\"])\n\n# display status returns\nprint(status)", "#\n# Copyright (C) 2018-2019 Pico Technology Ltd. See LICENSE file for terms.\n#\n# PS2000 Series (A API) STREAMING MODE EXAMPLE\n# This example demonstrates how to call the ps4000A driver API functions in order to open a device, setup 2 channels and collects streamed data (1 buffer).\n# This data is then plotted as mV against time in ns.\n\nimport ctypes\nimport numpy as np\nfrom picosdk.ps4000a import ps4000a as ps\nimport matplotlib.pyplot as plt\nfrom picosdk.functions import adc2mV, assert_pico_ok\nimport time\n\n# Create chandle and status ready for use\nchandle = ctypes.c_int16()\nstatus = {}\n\n# Open PicoScope 2000 Series device\n# Returns handle to chandle for use in future API functions\nstatus[\"openunit\"] = ps.ps4000aOpenUnit(ctypes.byref(chandle), None)\n\ntry:\n assert_pico_ok(status[\"openunit\"])\nexcept:\n\n powerStatus = status[\"openunit\"]\n\n if powerStatus == 286:\n status[\"changePowerSource\"] = ps.ps4000aChangePowerSource(chandle, powerStatus)\n else:\n raise\n\n assert_pico_ok(status[\"changePowerSource\"])\n\n\nenabled = 1\ndisabled = 0\nanalogue_offset = 0.0\n\n# Set up channel A\n# handle = chandle\n# channel = PS4000A_CHANNEL_A = 0\n# enabled = 1\n# coupling type = PS4000A_DC = 1\n# range = PS4000A_2V = 7\n# analogue offset = 0 V\nchannel_range = 7\nstatus[\"setChA\"] = ps.ps4000aSetChannel(chandle,\n ps.PS4000A_CHANNEL['PS4000A_CHANNEL_A'],\n enabled,\n ps.PS4000A_COUPLING['PS4000A_DC'],\n channel_range,\n analogue_offset)\nassert_pico_ok(status[\"setChA\"])\n\n# Set up channel B\n# handle = chandle\n# channel = PS4000A_CHANNEL_B = 1\n# enabled = 1\n# coupling type = PS4000A_DC = 1\n# range = PS4000A_2V = 7\n# analogue offset = 0 V\nstatus[\"setChB\"] = ps.ps4000aSetChannel(chandle,\n ps.PS4000A_CHANNEL['PS4000A_CHANNEL_B'],\n enabled,\n ps.PS4000A_COUPLING['PS4000A_DC'],\n channel_range,\n analogue_offset)\nassert_pico_ok(status[\"setChB\"])\n\n# Size of capture\nsizeOfOneBuffer = 500\nnumBuffersToCapture = 10\n\ntotalSamples = sizeOfOneBuffer * numBuffersToCapture\n\n# Create buffers ready for assigning pointers for data collection\nbufferAMax = np.zeros(shape=sizeOfOneBuffer, dtype=np.int16)\nbufferBMax = np.zeros(shape=sizeOfOneBuffer, dtype=np.int16)\n\nmemory_segment = 0\n\n# Set data buffer location for data collection from channel A\n# handle = chandle\n# source = PS4000A_CHANNEL_A = 0\n# pointer to buffer max = ctypes.byref(bufferAMax)\n# pointer to buffer min = ctypes.byref(bufferAMin)\n# buffer length = maxSamples\n# segment index = 0\n# ratio mode = PS4000A_RATIO_MODE_NONE = 0\nstatus[\"setDataBuffersA\"] = ps.ps4000aSetDataBuffers(chandle,\n ps.PS4000A_CHANNEL['PS4000A_CHANNEL_A'],\n bufferAMax.ctypes.data_as(ctypes.POINTER(ctypes.c_int16)),\n None,\n sizeOfOneBuffer,\n memory_segment,\n ps.PS4000A_RATIO_MODE['PS4000A_RATIO_MODE_NONE'])\nassert_pico_ok(status[\"setDataBuffersA\"])\n\n# Set data buffer location for data collection from channel B\n# handle = chandle\n# source = PS4000A_CHANNEL_B = 1\n# pointer to buffer max = ctypes.byref(bufferBMax)\n# pointer to buffer min = ctypes.byref(bufferBMin)\n# buffer length = maxSamples\n# segment index = 0\n# ratio mode = PS4000A_RATIO_MODE_NONE = 0\nstatus[\"setDataBuffersB\"] = ps.ps4000aSetDataBuffers(chandle,\n ps.PS4000A_CHANNEL['PS4000A_CHANNEL_B'],\n bufferBMax.ctypes.data_as(ctypes.POINTER(ctypes.c_int16)),\n None,\n sizeOfOneBuffer,\n memory_segment,\n ps.PS4000A_RATIO_MODE['PS4000A_RATIO_MODE_NONE'])\nassert_pico_ok(status[\"setDataBuffersB\"])\n\n# Begin streaming mode:\nsampleInterval = ctypes.c_int32(250)\nsampleUnits = ps.PS4000A_TIME_UNITS['PS4000A_US']\n# We are not triggering:\nmaxPreTriggerSamples = 0\nautoStopOn = 1\n# No downsampling:\ndownsampleRatio = 1\nstatus[\"runStreaming\"] = ps.ps4000aRunStreaming(chandle,\n ctypes.byref(sampleInterval),\n sampleUnits,\n maxPreTriggerSamples,\n totalSamples,\n autoStopOn,\n downsampleRatio,\n ps.PS4000A_RATIO_MODE['PS4000A_RATIO_MODE_NONE'],\n sizeOfOneBuffer)\nassert_pico_ok(status[\"runStreaming\"])\n\nactualSampleInterval = sampleInterval.value\nactualSampleIntervalNs = actualSampleInterval * 1000\n\nprint(\"Capturing at sample interval %s ns\" % actualSampleIntervalNs)\n\n# We need a big buffer, not registered with the driver, to keep our complete capture in.\nbufferCompleteA = np.zeros(shape=totalSamples, dtype=np.int16)\nbufferCompleteB = np.zeros(shape=totalSamples, dtype=np.int16)\nnextSample = 0\nautoStopOuter = False\nwasCalledBack = False\n\n\ndef streaming_callback(handle, noOfSamples, startIndex, overflow, triggerAt, triggered, autoStop, param):\n global nextSample, autoStopOuter, wasCalledBack\n wasCalledBack = True\n destEnd = nextSample + noOfSamples\n sourceEnd = startIndex + noOfSamples\n bufferCompleteA[nextSample:destEnd] = bufferAMax[startIndex:sourceEnd]\n bufferCompleteB[nextSample:destEnd] = bufferBMax[startIndex:sourceEnd]\n nextSample += noOfSamples\n if autoStop:\n autoStopOuter = True\n\n\n# Convert the python function into a C function pointer.\ncFuncPtr = ps.StreamingReadyType(streaming_callback)\n\n# Fetch data from the driver in a loop, copying it out of the registered buffers and into our complete one.\nwhile nextSample < totalSamples and not autoStopOuter:\n wasCalledBack = False\n status[\"getStreamingLastestValues\"] = ps.ps4000aGetStreamingLatestValues(chandle, cFuncPtr, None)\n if not wasCalledBack:\n # If we weren't called back by the driver, this means no data is ready. Sleep for a short while before trying\n # again.\n time.sleep(0.01)\n\nprint(\"Done grabbing values.\")\n\n# Find maximum ADC count value\n# handle = chandle\n# pointer to value = ctypes.byref(maxADC)\nmaxADC = ctypes.c_int16()\nstatus[\"maximumValue\"] = ps.ps4000aMaximumValue(chandle, ctypes.byref(maxADC))\nassert_pico_ok(status[\"maximumValue\"])\n\n# Convert ADC counts data to mV\nadc2mVChAMax = adc2mV(bufferCompleteA, channel_range, maxADC)\nadc2mVChBMax = adc2mV(bufferCompleteB, channel_range, maxADC)\n\n# Create time data\ntime = np.linspace(0, (totalSamples - 1) * actualSampleIntervalNs, totalSamples)\n\n# Plot data from channel A and B\nplt.plot(time, adc2mVChAMax[:])\nplt.plot(time, adc2mVChBMax[:])\nplt.xlabel('Time (ns)')\nplt.ylabel('Voltage (mV)')\nplt.show()\n\n# Stop the scope\n# handle = chandle\nstatus[\"stop\"] = ps.ps4000aStop(chandle)\nassert_pico_ok(status[\"stop\"])\n\n# Disconnect the scope\n# handle = chandle\nstatus[\"close\"] = ps.ps4000aCloseUnit(chandle)\nassert_pico_ok(status[\"close\"])\n\n# Display status returns\nprint(status)\n" ]
[ [ "numpy.linspace", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.linspace", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
kyawlin/smlb
[ "79c757d7fc040fb30ad44410be158b3ce3bdf30d" ]
[ "learners/scikit_learn/gaussian_process_regression_sklearn.py" ]
[ "\"\"\"Gaussian Process Regression, scikit-learn implementation.\n\nScientific Machine Learning Benchmark: \nA benchmark of regression models in chem- and materials informatics.\n2019-2020, Citrine Informatics.\n\nGaussian process regression is a Bayesian kernel regression algorithm.\nIt is closely related to its Frequentist counterpart, Kernel Ridge Regression.\n\"\"\"\n\nfrom typing import Optional, Sequence, Union\n\nimport numpy as np\n\nimport sklearn as skl\nfrom sklearn.gaussian_process.kernels import Kernel\n\nfrom smlb import (\n Data,\n NormalPredictiveDistribution,\n params,\n SupervisedLearner,\n)\n\n# todo: hyperparameter optimization.\n# two modes should be supported:\n# sklearn-internal optimization, corresponding to a learner without HPs\n# smlb optimization, where HPs are not optimized internally by the sklearn GP\n# currently, only the first mode is supported\n\n# todo: handle randomness via improved Random class\n\n\nclass GaussianProcessRegressionSklearn(SupervisedLearner):\n \"\"\"Gaussian Process Regression, scikit-learn implementation.\n\n The default is a Gaussian kernel combined with a \"White kernel\" to model additive Gaussian noise.\n\n Supports only numeric (vector) inputs and labels.\n\n See\n https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessRegressor.html\n https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.RBF.html#sklearn.gaussian_process.kernels.RBF\n https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.WhiteKernel.html#sklearn.gaussian_process.kernels.WhiteKernel\n \"\"\"\n\n def __init__(\n self,\n internal_hp_optimization: bool = True,\n kernel: Optional[Kernel] = None,\n alpha: Union[float, Sequence] = 1e-5,\n optimizer=\"fmin_l_bfgs_b\",\n n_restarts_optimizer=0,\n normalize_y=False,\n random_state: int = None,\n **kwargs\n ):\n \"\"\"Initialize state.\n\n sklearn-specific parameters are passed through to the implementation.\n\n Parameters:\n internal_hp_optimization: if True, hyperparameters are optimized \"internally\"\n by the Gaussian process, that is, scikit-learn optimizes hyperparameters\n and for smlb the learner has no hyperparameters;\n if False, hyperparameters are optimized by smlb (and scikit-learn does\n not optimize any hyperparameters)\n kernel: scikit-learn kernel; if None, a single Gaussian kernel is used as default\n alpha: regularization constant (scalar or vector); added as-is to kernel matrix diagonal.\n Equivalent to adding a \"WhiteKernel\"; the default is the corresponding value from\n scikit-learn's WhiteKernel, and different from scikit-learn's GaussianProcessRegressor.\n optimizer: hyperparameter optimization algorithm; used only if internal_hp_optimization is True\n n_restarts_optimizer: number of times optimizer is restarted; only used if internal_hp_optimization is True\n normalize_y: whether to subtract the mean of the labels\n random_state: integer seed\n\n See skl.gaussian_process.GaussianProcessRegressor parameters.\n \"\"\"\n\n super().__init__(**kwargs)\n\n internal_hp_optimization = params.boolean(internal_hp_optimization)\n kernel = params.any_(kernel, lambda arg: params.instance(arg, Kernel), params.none)\n # incomplete check for alpha as dimension becomes known only at fitting time\n alpha = params.any_(\n alpha,\n lambda arg: params.real(arg, from_=0),\n lambda arg: params.real_vector(arg, domain=[0, np.inf]),\n )\n # todo: check optimizer, requires params.union (of string and callable) and params.function\n normalize_y = params.boolean(normalize_y)\n random_state = params.integer(random_state)\n\n if kernel is None:\n kernel = skl.gaussian_process.kernels.RBF() + skl.gaussian_process.kernels.WhiteKernel()\n\n assert internal_hp_optimization is True # external HP optimization not yet supported\n\n self._model = skl.gaussian_process.GaussianProcessRegressor(\n kernel=kernel,\n alpha=alpha,\n optimizer=optimizer,\n n_restarts_optimizer=n_restarts_optimizer,\n normalize_y=normalize_y,\n random_state=random_state,\n )\n\n def fit(self, data: Data) -> \"GaussianProcessRegressionSklearn\":\n \"\"\"Fits the model using training data.\n\n Parameters:\n data: labeled data to train on;\n must derive from IndexedData and LabeledData\n\n Returns:\n self (allows chaining)\n \"\"\"\n\n data = params.instance(data, Data) # todo: params.data(..., is_finite=True, is_labeled=True)\n n = data.num_samples\n\n xtrain = params.real_matrix(data.samples(), nrows=n)\n ytrain = params.real_vector(data.labels(), dimensions=n)\n\n self._model.fit(xtrain, ytrain)\n\n return self\n\n def apply(self, data: Data) -> NormalPredictiveDistribution:\n r\"\"\"Predicts new inputs.\n\n For Gaussian processes, both the noise-free predictive (posterior) \n distribution as well as the noise estimate are normally distributed.\n The predictive distribution with noise is the sum of the former two.\n\n The $\\alpha$ training noise specified at initialization time is not\n added at prediction time, and thus not part of the noise model.\n The current implementation considers contributions from any \n WhiteKernel or other kernel that has a hyperparameter 'noise_level'.\n\n Limitations:\n It is a currently accepted shortcoming that WhiteKernels that are\n not 'first-level' sum members might yield wrong noise models.\n Examples: \n WhiteKernel(...) + other kernels will work\n kernel(...) * WhiteKernel(...) will not work as intended\n\n Training data noise $\\alpha$ is not added \n\n Parameters:\n data: finite indexed data to predict;\n\n Returns:\n predictive normal distribution with the following decomposition:\n predicted: sum of model and noise distribution\n noise_part: normal distribution for estimated noise\n signal_part: normal distribution for estimated model contribution;\n the Gaussian process' \"predictive variance\";\n depends only on distance from the training data\n \"\"\"\n\n data = params.instance(data, Data) # todo: params.data(..., is_finite=True, is_labeled=True)\n\n xpred = params.real_matrix(data.samples())\n n = data.num_samples\n\n # predict\n preds, stddevs = self._model.predict(xpred, return_std=True)\n\n # noise\n # noise are all noise_level of WhiteKernel, where noise_level is variance (not standard deviation)\n # this assumes that the noise level are independent\n noise = tuple(\n v for k, v in self._model.kernel_.get_params().items() if k.endswith(\"noise_level\")\n )\n noise = np.ones(shape=n) * np.sum(noise)\n noise_part = NormalPredictiveDistribution(mean=np.zeros(shape=n), stddev=np.sqrt(noise))\n\n return NormalPredictiveDistribution(\n mean=preds,\n stddev=np.sqrt(np.square(stddevs) + noise),\n noise_part=noise_part,\n signal_part=NormalPredictiveDistribution(mean=preds, stddev=stddevs),\n )\n" ]
[ [ "numpy.square", "numpy.sqrt", "numpy.ones", "sklearn.gaussian_process.GaussianProcessRegressor", "sklearn.gaussian_process.kernels.WhiteKernel", "sklearn.gaussian_process.kernels.RBF", "numpy.zeros", "numpy.sum" ] ]
ihakiwamu/Experiment_Ryakugo
[ "79667ef2d66ae8008c8ce57c27fe4d65d153aee6" ]
[ "ziken03/sample.py" ]
[ "from sklearn import datasets\nimport numpy as np\niris = datasets.load_iris()\nX = iris.data[:, [2, 3]]\ny = iris.target\nprint(X)\n" ]
[ [ "sklearn.datasets.load_iris" ] ]
Emekaborisama/word_embedding_loader
[ "5b0fd435360d335341dc111bbc52869bb2731422" ]
[ "test/word_embedding_loader/saver/test_saver.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nfrom word_embedding_loader import word_embedding\nimport word_embedding_loader.saver as saver\n\n\[email protected]\ndef word_embedding_data():\n vocab = (\n (b'</s>', 0),\n (b'the', 1),\n ('日本語'.encode('utf-8'), 2)\n )\n vocab_dict = dict(vocab)\n arr = np.array(\n [[0.418, 0.24968, -0.41242, 0.1217],\n [0.013441, 0.23682, -0.16899, 0.40951],\n [0.15164, 0.30177, -0.16763, 0.17684]], dtype=np.float32)\n return arr, vocab, vocab_dict\n\n\[email protected](\"mod\", [\n (saver.glove, 'glove', False),\n (saver.word2vec_bin, 'word2vec', True),\n (saver.word2vec_text, 'word2vec', False)\n])\ndef test_save(word_embedding_data, mod, tmpdir):\n _saver, wtype, binary = mod\n arr_input, vocab_input, vocab_expected = word_embedding_data\n\n with open(tmpdir.join('output.txt').strpath, 'a+b') as f:\n _saver.save(f, arr_input, vocab_input)\n f.seek(0)\n obj = word_embedding.WordEmbedding.load(\n f.name, dtype=np.float32, format=wtype, binary=binary)\n vocab = obj.vocab\n arr = obj.vectors\n\n assert_array_equal(arr, arr_input)\n assert vocab_expected == vocab\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.array" ] ]
cansik/mesh-sequence-player
[ "1b37467e3043f135725049d93431503f64fd5c73" ]
[ "mesh_sequence_player/MeshSequencePlayer.py" ]
[ "import os.path\nimport time\nfrom functools import partial\nfrom typing import Optional\n\nimport numpy as np\nimport open3d as o3d\nfrom moviepy.video.io.ImageSequenceClip import ImageSequenceClip\nfrom tqdm import tqdm\n\nfrom mesh_sequence_player.FPSCounter import FPSCounter\nfrom mesh_sequence_player.FastGeometryLoader import load_meshes_fast, load_meshes_safe, load_pointclouds_safe, \\\n load_pointclouds_fast\nfrom mesh_sequence_player.geometries.BaseGeometry import BaseGeometry\nfrom mesh_sequence_player.geometries.Geometry import Geometry\nfrom mesh_sequence_player.geometries.LazyGeometry import LazyGeometry\nfrom mesh_sequence_player.utils import get_files_in_path\n\n\nclass MeshSequencePlayer:\n def __init__(self, fps: int = 24, loop: bool = True):\n self.fps = fps\n self.loop = loop\n self.geometries: [BaseGeometry] = []\n self.rotation_x = 0.0\n self.rotation_y = 0.0\n self.background_color = [255, 255, 255]\n\n self.debug = False\n self.load_safe = False\n self.lazy_loading = False\n self.post_process_mesh = False\n\n self.render = False\n self.output_path = \"render.mp4\"\n self.render_index = 0\n\n self.vis = o3d.visualization.Visualizer()\n\n self._is_playing: bool = False\n self._index: int = 0\n self._last_update_ts = 0\n self._current_geometry = None\n\n self.bitrate = \"1.5M\"\n self._frames = []\n self._progress_bar: Optional[tqdm] = None\n self._render_fps = fps\n\n self._fps_counter = FPSCounter()\n\n def load_meshes(self, mesh_folder: str, mesh_format: str = \"*.obj\"):\n files = sorted(get_files_in_path(mesh_folder, extensions=[mesh_format]))\n\n if self.lazy_loading:\n method = partial(o3d.io.read_triangle_mesh, enable_post_processing=self.post_process_mesh)\n self.geometries = [LazyGeometry(os.path.abspath(file), method) for file in files]\n return\n\n if self.load_safe:\n meshes = load_meshes_safe(files, post_processing=self.post_process_mesh)\n else:\n meshes = load_meshes_fast(files, post_processing=self.post_process_mesh)\n\n self.geometries = [Geometry(mesh) for mesh in meshes]\n\n def load_pointclouds(self, pcl_folder: str, pcl_format: str = \"*.ply\"):\n files = sorted(get_files_in_path(pcl_folder, extensions=[pcl_format]))\n\n if self.lazy_loading:\n self.geometries = [LazyGeometry(os.path.abspath(file), o3d.io.read_point_cloud) for file in files]\n return\n\n if self.load_safe:\n pcds = load_pointclouds_safe(files)\n else:\n pcds = load_pointclouds_fast(files)\n\n self.geometries = [Geometry(pcd) for pcd in pcds]\n\n def open(self, window_name: str = 'Mesh Sequence Player',\n width: int = 1080, height: int = 1080,\n visible: bool = True):\n self.vis.create_window(window_name=window_name,\n width=width,\n height=height,\n visible=visible)\n\n if len(self.geometries) == 0:\n print(\"No meshes to show!\")\n return\n\n if self.render:\n self._frames = []\n self._progress_bar = tqdm(total=len(self.geometries), desc=\"rendering\")\n\n # make rendering as fast as possible\n self.fps = 10000.0\n\n # set background color\n opt = self.vis.get_render_option()\n opt.background_color = np.asarray(self.background_color)\n\n # add first mesh\n self._current_geometry = self.geometries[self._index].get()\n self.vis.add_geometry(self._current_geometry, reset_bounding_box=True)\n\n def close(self):\n self._is_playing = False\n self.vis.destroy_window()\n\n def play(self):\n self._is_playing = True\n self._play_loop()\n\n def pause(self):\n self._is_playing = False\n\n def jump(self, index: int):\n self._index = index\n\n def _play_loop(self):\n self._fps_counter.reset()\n\n while self._is_playing:\n # rotation\n ctr = self.vis.get_view_control()\n ctr.rotate(self.rotation_x, self.rotation_y)\n\n # events\n if not self.vis.poll_events():\n break\n\n self.vis.update_renderer()\n\n # skip if no meshes available\n if len(self.geometries) == 0:\n continue\n\n # render\n if self.render:\n color = self.vis.capture_screen_float_buffer(False)\n color = np.asarray(color)\n color = np.uint8(color * 255.0)\n # im_rgb = cvtColor(color, COLOR_BGR2RGB)\n self._frames.append(color)\n\n self.render_index += 1\n self._progress_bar.update()\n\n # frame playing\n current = self._millis()\n if (current - self._last_update_ts) > (1000.0 / self.fps):\n self._next_frame()\n self._last_update_ts = current\n\n # keep track of fps\n self._fps_counter.update()\n\n if self.debug:\n tqdm.write(\"FPS: %0.2f\" % self._fps_counter.fps)\n\n def _next_frame(self):\n if not self.loop and self._index == len(self.geometries) - 1:\n if self.render:\n tqdm.write(\"\\nsaving rendering...\")\n clip = ImageSequenceClip(self._frames, fps=self._render_fps)\n clip.write_videofile(self.output_path, bitrate=self.bitrate, logger=None)\n clip.close()\n self._progress_bar.close()\n\n self._is_playing = False\n\n self.vis.remove_geometry(self._current_geometry, reset_bounding_box=False)\n self._index = (self._index + 1) % len(self.geometries)\n self._current_geometry = self.geometries[self._index].get()\n self.vis.add_geometry(self._current_geometry, reset_bounding_box=False)\n\n @staticmethod\n def _millis() -> int:\n return round(time.time() * 1000)\n" ]
[ [ "numpy.asarray", "numpy.uint8" ] ]
psychochatbot/Psychochatbot
[ "9a5b7cdd11f1347d5fecb90e0ee5536500fb8dd2" ]
[ "app.py" ]
[ "#!/usr/bin/env python\n\nimport urllib\nimport json\nimport os\nimport pickle\nimport io\nimport pandas as pd\nfrom flask import Flask\nfrom flask import request\nfrom flask import make_response\nfrom flask import session\nfrom sklearn.preprocessing import LabelEncoder\n\n\n# Flask app should start in global layout\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'oh_so_secret'\nwith open('psycho.pkl', 'rb') as f:\n model = pickle.load(f,encoding='latin1')\n \nwith io.open('data.json', 'w', encoding='utf8') as outfile:\n entry = {}\n entry['name'] = 'happy'\n str_=json.dumps(entry,ensure_ascii=False)\n outfile.write((str_))\ntry:\n to_unicode = unicode\nexcept NameError:\n to_unicode = str\n\n\n#with open('record.txt','w') as ef:\n # ef.write('ephimerel testing')\n#print(model.predict([[2,0,0,1]]))\n\n#@app.before_first_request\n#def initiali():\n # global father_occupation\n # global mother_occupation\n \n\n\n\[email protected]('/webhook', methods=['POST'])\ndef webhook():\n req = request.get_json(silent=True, force=True)\n\n print(\"Request:\")\n print(json.dumps(req, indent=4))\n\n res = makeWebhookResult(req)\n\n res = json.dumps(res, indent=4)\n print(res) \n r = make_response(res)\n r.headers['Content-Type'] = 'application/json'\n return r\n\ndef dump_value(key,value):\n with open('data.json', 'r') as data_file:\n data_loaded =json.loads(data_file.read())\n with io.open('data.json', 'w', encoding='utf8') as outfile:\n #data_loaded =json.loads(outfile)\n # abc=eval(data_loaded)\n data_loaded[key]=value\n str_=json.dumps(data_loaded,ensure_ascii=False)\n outfile.write((str_))\n\ndef makeWebhookResult(req):\n #father_occupation=\"\"\n # mother_occupation=\"\"\n if req.get(\"result\").get(\"action\") == \"how_are_you\":\n # return {}\n result = req.get(\"result\")\n parameters = result.get(\"parameters\")\n zone = parameters.get(\"how_r_u\")\n speech=\"you are \"+str(zone)\n #speech=\"you are bhoot\"\n print(zone)\n print(speech)\n return {\n \"speech\": speech,\n \"displayText\": speech,\n #\"data\": {},\n # \"contextOut\": [],\n \"source\": \"apiai-psychochatbot\",\n \"followupEvent\": {\n \"name\": \"event_occupation_father\"\n }\n }\n \n \n if req.get(\"result\").get(\"action\") == \"action_welcome\":\n result = req.get(\"result\")\n session.clear()\n with io.open('data.json', 'w', encoding='utf8') as outfile:\n entry = {}\n entry['name'] = 'happy'\n str_=json.dumps(entry,ensure_ascii=False)\n outfile.write((str_))\n return {}\n # return {\n # \"speech\": \"apka swagat h\",\n #\"displayText\": \"apka swagat h\"\n #}\n \n \n if req.get(\"result\").get(\"action\") == \"action_welcome_good_day\":\n result = req.get(\"result\")\n # parameters = result.get(\"parameters\")\n #for key in session.keys():\n # session.pop(key)\n session.clear()\n speech=\"Okay, let's talk about your family\"\n print(speech)\n return {\n \"speech\": speech,\n \"displayText\": speech,\n #\"data\": {},\n #\"contextOut\": [],\n \"source\": \"apiai-psychochatbot\",\n \"followupEvent\": {\n \"name\": \"event_hobbies_interests\"\n }\n }\n \n if req.get(\"result\").get(\"action\") == \"action_welcome_bad_day\":\n result = req.get(\"result\")\n # parameters = result.get(\"parameters\")\n \n speech=\"Okay, let's talk about your family\"\n print(speech)\n return {\n \"speech\": speech,\n \"displayText\": speech,\n #\"data\": {},\n #\"contextOut\": [],\n \"source\": \"apiai-psychochatbot\",\n \"followupEvent\": {\n \"name\": \"event_joke\"\n }\n }\n \n if req.get(\"result\").get(\"action\") == \"action_joke_no\":\n result = req.get(\"result\")\n return {\n \"source\": \"apiai-psychochatbot\",\n \"followupEvent\": {\n \"name\": \"event_joke\"\n }\n }\n \n if req.get(\"result\").get(\"action\") == \"action_joke_yes\":\n result = req.get(\"result\")\n return {\n \"source\": \"apiai-psychochatbot\",\n \"followupEvent\": {\n \"name\": \"event_hobbies_interests\"\n }\n }\n \n #THIS IS NEW VERSION\n if req.get(\"result\").get(\"action\") == \"action_father_occupation\":\n result = req.get(\"result\")\n parameters = result.get(\"parameters\")\n father_occupation=parameters.get(\"f_o\")\n dump_value('father_occupation',father_occupation)\n psycho_algo = pd.read_csv(\"psycho_data.csv\")\n psycho_algo.head()\n number = LabelEncoder()\n psycho_algo['like_mother'] = number.fit_transform(psycho_algo['like_mother'])\n #print(psycho_algo['like_mother'])\n # print(\"this is \")\n #print(number.transform(['y']))\n \n #with io.open('data.json', 'a', encoding='utf8') as outfile:\n # entry = {}\n # entry['father_occupation']=father_occupation\n # str_=json.dumps(entry,ensure_ascii=False)\n # outfile.write((str_))\n with open('data.json', 'r') as data_file:\n data_loaded =json.loads(data_file.read())\n print(data_loaded['father_occupation'])\n # data_file.close()\n if('mother_occupation' not in data_loaded):\n speech=\"your father is \"+data_loaded['father_occupation']+\" what does your mother do?\"\n return {\n \"speech\": speech,\n \"displayText\": speech,\n \"source\": \"apiai-psychochatbot\"\n }\n #speech=\"Okay, let's talk about your family\"\n else: \n speech=\"your father is \"+data_loaded['father_occupation']+\"and maa is \"+data_loaded['mother_occupation']\n return {\n \"speech\": speech,\n \"displayText\": speech,\n \"source\": \"apiai-psychochatbot\",\n \"followupEvent\": {\n \"name\": \"event_ask_be_like\"\n }\n }\n print(speech)\n return {\n \"speech\": speech,\n \"displayText\": speech,\n #\"data\": {},\n #\"contextOut\": [],\n \"source\": \"apiai-psychochatbot\"\n }\n \n #THIS IS NEW VERSION\n if req.get(\"result\").get(\"action\") == \"action_mother_occupation\":\n result = req.get(\"result\")\n parameters = result.get(\"parameters\")\n mother_occupation=parameters.get(\"m_o\")\n dump_value('mother_occupation',mother_occupation)\n #if(session['father_occupation']==\"\"):\n with open('data.json', 'r') as data_file:\n data_loaded =json.loads(data_file.read())\n# print(data_loaded['father_occupation'])\n if('father_occupation' not in data_loaded):\n speech=\"your mother is \"+data_loaded['mother_occupation']+\" what does your father do?\"\n return {\n \"speech\": speech,\n \"displayText\": speech,\n \"source\": \"apiai-psychochatbot\"\n }\n #speech=\"Okay, let's talk about your family\"\n else: \n speech=\"your mother is \"+data_loaded['mother_occupation']+\" and your father is \"+data_loaded['father_occupation']\n return {\n \"speech\": speech,\n \"displayText\": speech,\n \"source\": \"apiai-psychochatbot\",\n \"followupEvent\": {\n \"name\": \"event_ask_be_like\"\n }\n }\n \n \n if req.get(\"result\").get(\"action\") == \"action_ask_be_like_father\":\n result = req.get(\"result\")\n dump_value('ask_be_like_father','yes')\n dump_value('ask_be_like_mother','no')\n #data_loaded['ask_be_like']=\"father\"\n return {\n \"source\": \"apiai-psychochatbot\",\n \"followupEvent\": {\n \"name\": \"event_result\"\n }\n }\n \n \n if req.get(\"result\").get(\"action\") == \"action_ask_be_like_mother\":\n result = req.get(\"result\")\n dump_value('ask_be_like_father','no')\n dump_value('ask_be_like_mother','yes')\n #data_loaded['ask_be_like']=\"mother\"\n return {\n \"source\": \"apiai-psychochatbot\",\n \"followupEvent\": {\n \"name\": \"event_result\"\n }\n }\n \n \n if req.get(\"result\").get(\"action\") == \"action_hobbies_interests_art\":\n result = req.get(\"result\")\n data_loaded['hobby']=\"arts\"\n parameters = result.get(\"parameters\")\n duration=parameters.get(\"duration\")\n certifications=parameters.get(\"certifications\")\n as_career=parameters.get(\"as_career\")\n parents_support=parameters.get(\"parents_support\")\n dump_value('hobby','arts')\n dump_value('duration','n')\n dump_value('certifications',certifications)\n dump_value('as_career',as_career)\n dump_value('parents_support',parents_support)\n \n # data_loaded['duration']=duration\n # data_loaded['certis']= certifications\n # data_loaded['as_career']=as_career\n #data_loaded['parents_support']=parents_support\n \n return {\n # \"speech\": \"arts\",\n #\"displayText\": \"arts\",\n \"source\": \"apiai-psychochatbot\"\n # \"followupEvent\": {\n # \"name\": \"event_hobbies_interests\"\n #}\n }\n \n if req.get(\"result\").get(\"action\") == \"action_hobbies_interests_sports\":\n result = req.get(\"result\")\n parameters = result.get(\"parameters\")\n duration=parameters.get(\"duration\") \n print(duration)\n achievements=parameters.get(\"achievements\")\n as_career=parameters.get(\"as_career\")\n parents_support=parameters.get(\"parents_support\")\n dump_value('hobby','sports')\n dump_value('duration','h')\n dump_value('certifications',achievements)\n dump_value('as_career',as_career)\n dump_value('parents_support',parents_support)\n return {\n \"speech\": \"Do you have any other hobbies or interests?\",\n \"displayText\": \"Do you have any other hobbies or interests?\",\n \"source\": \"apiai-psychochatbot\"\n # \"followupEvent\": {\n # \"name\": \"event_hobbies_interests\"\n #}\n }\n \n if req.get(\"result\").get(\"action\") == \"action_hobbies_interests_misc\":\n result = req.get(\"result\")\n parameters = result.get(\"parameters\")\n duration=parameters.get(\"duration\")\n certification=parameters.get(\"certification\")\n as_career=parameters.get(\"as_career\")\n parents_support=parameters.get(\"parents_support\")\n return {\n \"source\": \"apiai-psychochatbot\"\n # \"followupEvent\": {\n # \"name\": \"event_hobbies_interests\"\n #}\n }\n \n \n if req.get(\"result\").get(\"action\") == \"action_hobbies_interests_arts_no\":\n return{\n \"followupEvent\": {\n \"name\": \"event_ask_about_family\"\n }\n }\n \n if req.get(\"result\").get(\"action\") == \"action_hobbies_interests_sports_no\":\n return{\n \"followupEvent\": {\n \"name\": \"event_ask_about_family\"\n }\n }\n \n if req.get(\"result\").get(\"action\") == \"action_hobbies_interests_misc_no\":\n return{\n \"followupEvent\": {\n \"name\": \"event_ask_about_family\"\n }\n } \n \n if req.get(\"result\").get(\"action\") == \"action_hobbies_interests_extras_no\":\n return{\n \"followupEvent\": {\n \"name\": \"event_ask_about_family\"\n }\n } \n \n if req.get(\"result\").get(\"action\") == \"action_result\":\n psycho_algo = pd.read_csv(\"psycho_data_final.csv\")\n psycho_algo.head()\n number1 = LabelEncoder()\n psycho_algo['like_mother'] = number1.fit_transform(psycho_algo['like_mother'])\n number2 = LabelEncoder()\n psycho_algo['like_father'] = number2.fit_transform(psycho_algo['like_father'])\n number3 = LabelEncoder()\n psycho_algo['hobby_as_career'] = number3.fit_transform(psycho_algo['hobby_as_career'])\n number4 = LabelEncoder()\n psycho_algo['achieve_hobby'] = number4.fit_transform(psycho_algo['achieve_hobby'])\n number5 = LabelEncoder()\n psycho_algo['duration_passion'] = number5.fit_transform(psycho_algo['duration_passion'])\n number6 = LabelEncoder()\n psycho_algo['parent_support'] = number6.fit_transform(psycho_algo['parent_support'])\n number7 = LabelEncoder()\n psycho_algo['result'] = number7.fit_transform(psycho_algo['result'])\n with open('data.json', 'r') as data_file:\n data_loaded =json.loads(data_file.read())\n like_mother=str(data_loaded['ask_be_like_father'])\n like_father=str(data_loaded['ask_be_like_mother'])\n hobby_as_career=str(data_loaded['as_career'])\n achieve_hobby=str(data_loaded['certifications'])\n duration_passion='h'\n parent_support=str(data_loaded['parents_support'])\n \n list_lm=[]\n list_lm.append(like_mother)\n list_lf=[]\n list_lf.append(like_father)\n list_hc=[]\n list_hc.append(hobby_as_career)\n list_ah=[]\n list_ah.append(achieve_hobby)\n list_dp=[]\n list_dp.append(duration_passion)\n list_ps=[]\n list_ps.append(parent_support)\n# like_mother=list(like_mother)\n # like_father=list(like_father)\n # hobby_as_career=list(hobby_as_career)\n # achieve_hobby=list(achieve_hobby)\n # duration_passion=list(duration_passion)\n # parent_support=list(parent_support)\n \n lm=number1.transform(list_lm)\n lf=number2.transform(list_lf)\n hc=number3.transform(list_hc)\n ah=number4.transform(list_ah)\n dp=number5.transform(list_dp)\n ps=number6.transform(list_ps)\n \n print(ps)\n res=number7.inverse_transform(model.predict([[lm[0],lf[0],hc[0],ah[0],dp[0],ps[0]]]))\n r=res[0]\n print(r)\n with open('data.json', 'r') as data_file:\n data_loaded =json.loads(data_file.read())\n if(str(r)== 'fo'): \n ar=data_loaded['father_occupation']\n else:\n if(str(r)=='mo'):\n ar=data_loaded['mother_occupation'] \n else:\n ar=data_loaded['hobby']\n speech='On the basis of information provided by you, I think the most suitable career option for you would be to opt for ' + ar\n if(lm[0] == 1 and str(r)=='mo'): \n speech = speech + ' since you wanted to be like your mother'\n if(lf[0] == 1 and str(r)=='fo'): \n speech = speech + ' since you wanted to be like your father'\n if(hc[0] == 1 and str(r)=='hc'): \n speech = speech + ' Also since you wanted to make a carrer in your hobby and possess achievements in it '\n if(ps[0] == 1): \n speech = speech + ' and your parents also support you in this. '\n speech =speech + 'All the best for your future.'\n return{\n \"speech\": speech,\n \"displayText\": speech,\n #\"data\": {},\n #\"contextOut\": [],\n \"source\": \"apiai-psychochatbot\"\n }\n\n print(speech)\n return {\n \"speech\": speech,\n \"displayText\": speech,\n #\"data\": {},\n #\"contextOut\": [],\n \"source\": \"apiai-psychochatbot\"\n # \"followupEvent\": {\n # \"name\": \"event_ask_about_family\"\n #}\n }\n \n \n # if req.get(\"result\").get(\"action\") == \"action_ask_about_family_yes\":\n # result = req.get(\"result\")\n # parameters = result.get(\"parameters\")\n \n # speech=\"Okay, let's talk about your family\"\n # print(speech)\n # return {\n # \"speech\": speech,\n # \"displayText\": speech,\n #\"data\": {},\n #\"contextOut\": [],\n # \"source\": \"apiai-psychochatbot\",\n # \"followupEvent\": {\n # \"name\": \"event_occupation_father\"\n #}\n #}\n \n \n \n if req.get(\"result\").get(\"action\") == \"action_occupation_father\":\n result = req.get(\"result\")\n parameters = result.get(\"parameters\")\n occupation_father=parameters.get(\"f_o\")\n speech=\"your father is \"+str(occupation_father)\n # print(occup)\n print(speech)\n return {\n \"speech\": speech,\n \"displayText\": speech,\n #\"data\": {},\n #\"contextOut\": [],\n \"source\": \"apiai-psychochatbot\",\n \"followupEvent\": {\n \"name\": \"event_occupation_mother\"\n }\n }\n \n \n if req.get(\"result\").get(\"action\") == \"action_occupation_mother\":\n result = req.get(\"result\")\n parameters = result.get(\"parameters\")\n occupation_mother=parameters.get(\"m_o\")\n speech=\"your mother is \"+str(occupation_mother)\n # print(occup)\n print(speech)\n return {\n \"speech\": speech,\n \"displayText\": speech,\n # \"data\": {},\n #\"contextOut\": [],\n \"source\": \"apiai-psychochatbot\",\n \"followupEvent\": {\n \"name\": \"event_want_to_be_father\"\n }\n }\n \n \n \n \n \n # if req.get(\"result\").get(\"action\") == \"occupation_\":\n # result = req.get(\"result\")\n # parameters = result.get(\"parameters\")\n # occup=parameters.get(\"abc\")\n #speech=\"your father is \"+str(occup)\n #print(occup)\n #print(speech)\n\n return {\n \"speech\": speech,\n \"displayText\": speech,\n #\"data\": {},\n # \"contextOut\": [],\n \"source\": \"apiai-psychochatbot\"\n #\"followupEvent\": {\n # \"name\": \"I1\"\n # }\n }\n\n\nif __name__ == '__main__':\n port = int(os.getenv('PORT', 5000))\n # app.secret_key=\"Sgsits2018\"\n print(\"Starting app on port %d\" % port)\n app.run(debug=True, port=port, host='0.0.0.0')\n" ]
[ [ "sklearn.preprocessing.LabelEncoder", "pandas.read_csv" ] ]
medusa-trade/alpaca-trade-api-python
[ "4ed48200a7bec15705b2c56c3f2ae94f4636cd29" ]
[ "tests/test_polygon/test_rest.py" ]
[ "import datetime\nimport pandas as pd\nfrom alpaca_trade_api import polygon\nfrom alpaca_trade_api.polygon import REST\nimport pytest\nimport requests_mock\nfrom alpaca_trade_api.polygon.rest import FinancialsReportType, FinancialsSort\n\n\[email protected]\ndef reqmock():\n with requests_mock.Mocker() as m:\n yield m\n\n\ndef endpoint(path, params='', api_version='v1'):\n return 'https://api.polygon.io/{}{}?{}&apiKey=key-id'.format(\n api_version, path, params\n )\n\n\ndef test_polygon(reqmock):\n cli = REST('key-id')\n\n # Exchanges\n reqmock.get(endpoint('/meta/exchanges'), text='''\n [{\"id\":0,\"type\":\"TRF\",\"market\":\"equities\",\"mic\":\"TFF\",\"name\":\"Multiple\",\"tape\":\"-\"}]\n''')\n\n exchanges = cli.exchanges()\n assert exchanges[0].id == 0\n assert 'Exchange(' in str(exchanges[0])\n with pytest.raises(AttributeError):\n exchanges[0].foo\n\n # Symbol Type Map\n reqmock.get(endpoint('/meta/symbol-types'), text='''\n{\n \"cs\": \"Common Stock\",\n \"adr\": \"American Depository Receipt\",\n \"cef\": \"Closed-End Fund\",\n \"etp\": \"Exchange Traded Product\",\n \"reit\": \"Real Estate Investment Trust\",\n \"mlp\": \"Master Limited Partnership\",\n \"wrt\": \"Equity WRT\",\n \"pub\": \"Public\",\n \"nyrs\": \"New York Registry Shares\",\n \"unit\": \"Unit\",\n \"right\": \"Right\",\n \"trak\": \"Tracking stock or targeted stock\",\n \"ltdp\": \"Limited Partnership\",\n \"rylt\": \"Royalty Trust\",\n \"mf\": \"Mutual Fund\",\n \"pfd\": \"Preferred Stoc\"\n}\n''')\n\n tmap = cli.symbol_type_map()\n assert tmap.cs == 'Common Stock'\n\n # Historic Aggregates V2\n aggs_response = '''\n{\n \"ticker\": \"AAPL\",\n \"status\": \"OK\",\n \"adjusted\": true,\n \"queryCount\": 55,\n \"resultsCount\": 2,\n \"results\": [\n {\n \"o\": 173.15,\n \"c\": 173.2,\n \"l\": 173.15,\n \"h\": 173.21,\n \"v\": 1800,\n \"t\": 1517529605000\n }\n ]\n}'''\n\n reqmock.get(\n endpoint(\n '/aggs/ticker/AAPL/range/1/day/2018-02-02/2018-02-06',\n params='unadjusted=False', api_version='v2'\n ),\n text=aggs_response)\n\n reqmock.get(\n endpoint(\n '/aggs/ticker/AAPL/range/1/day/1546300800000/2018-02-06',\n params='unadjusted=False', api_version='v2'\n ),\n text=aggs_response)\n\n aggs = cli.historic_agg_v2(\n 'AAPL', 1, 'day',\n _from='2018-2-2',\n to='2018-2-5'\n )\n assert aggs[0].open == 173.15\n assert len(aggs) == 1\n assert aggs.df.iloc[0].high == 173.21\n with pytest.raises(AttributeError):\n aggs[0].foo\n\n # test different supported date formats, just make sure they are parsed\n # correctly by the sdk. don't care about the response\n cli.historic_agg_v2(\n 'AAPL', 1, 'day',\n _from=datetime.datetime(2018, 2, 2),\n to='2018-2-5'\n )\n\n # test different supported date formats\n cli.historic_agg_v2(\n 'AAPL', 1, 'day',\n _from=datetime.date(2018, 2, 2),\n to='2018-2-5'\n )\n\n # test different supported date formats\n cli.historic_agg_v2(\n 'AAPL', 1, 'day',\n _from=pd.Timestamp('2018-2-2'),\n to='2018-2-5'\n )\n\n cli.historic_agg_v2(\n 'AAPL', 1, 'day',\n _from=pd.Timestamp('2019-01-01').timestamp()*1000,\n to='2018-2-5'\n )\n\n with pytest.raises(Exception):\n cli.historic_agg_v2(\n 'AAPL', 1, 'day',\n _from=\"bad format\",\n to='2018-2-5'\n )\n\n # Last Trade\n reqmock.get(\n endpoint('/last/stocks/AAPL'),\n text='''\n{\n \"status\": \"success\",\n \"symbol\": \"AAPL\",\n \"last\": {\n \"price\": 159.59,\n \"size\": 20,\n \"exchange\": 11,\n \"cond1\": 14,\n \"cond2\": 16,\n \"cond3\": 0,\n \"cond4\": 0,\n \"timestamp\": 1518086464720\n }\n}''')\n\n trade = cli.last_trade('AAPL')\n assert trade.price == 159.59\n assert trade.timestamp.day == 8\n\n # Last Quote\n reqmock.get(\n endpoint('/last_quote/stocks/AAPL'),\n text='''\n{\n \"status\": \"success\",\n \"symbol\": \"AAPL\",\n \"last\": {\n \"askprice\": 159.59,\n \"asksize\": 2,\n \"askexchange\": 11,\n \"bidprice\": 159.45,\n \"bidsize\": 20,\n \"bidexchange\": 12,\n \"timestamp\": 1518086601843\n }\n}''')\n\n quote = cli.last_quote('AAPL')\n assert quote.askprice == 159.59\n assert quote.timestamp.day == 8\n\n # Condition Map\n reqmock.get(\n endpoint('/meta/conditions/trades'),\n text='''\n{\n \"1\": \"Regular\",\n \"2\": \"Acquisition\",\n \"3\": \"AveragePrice\",\n \"4\": \"AutomaticExecution\"\n}''')\n\n cmap = cli.condition_map()\n assert cmap._raw['1'] == 'Regular'\n\n # Company\n reqmock.get(\n endpoint('/meta/symbols/company', 'symbols=AAPL'),\n text='''[{\"symbol\": \"AAPL\"}]''',\n )\n\n ret = cli.company('AAPL')\n assert ret.symbol == 'AAPL'\n ret = cli.company(['AAPL'])\n assert ret['AAPL'].symbol == 'AAPL'\n\n # Dividends\n reqmock.get(\n endpoint('/meta/symbols/dividends', 'symbols=AAPL'),\n text='''{\"AAPL\": [{\"qualified\": \"Q\"}]}''',\n )\n ret = cli.dividends('AAPL')\n assert ret[0].qualified == 'Q'\n ret = cli.dividends(['AAPL'])\n assert ret['AAPL'][0].qualified == 'Q'\n\n # Splits\n reqmock.get(\n endpoint('/reference/splits/AAPL', api_version='v2'),\n text='''{\"results\": [{\"forfactor\": 1}]}''',\n )\n ret = cli.splits('AAPL')\n assert ret[0].forfactor == 1\n\n # Earnings\n reqmock.get(\n endpoint('/meta/symbols/earnings', 'symbols=AAPL'),\n text='''{\"AAPL\": [{\"actualEPS\": 1}]}''',\n )\n ret = cli.earnings('AAPL')\n assert ret[0].actualEPS == 1\n ret = cli.earnings(['AAPL'])\n assert ret['AAPL'][0].actualEPS == 1\n\n # Financials\n reqmock.get(\n endpoint('/meta/symbols/financials', 'symbols=AAPL'),\n text='''{\"AAPL\": [{\"reportDateStr\": \"2018-09-01\"}]}''',\n )\n ret = cli.financials('AAPL')\n assert ret[0].reportDateStr == '2018-09-01'\n ret = cli.financials(['AAPL'])\n assert ret['AAPL'][0].reportDateStr == '2018-09-01'\n\n # Financials v2\n reqmock.get(\n endpoint('/reference/financials/AAPL', api_version='v2'),\n text='''\n {\n \"status\": \"OK\",\n \"results\": [\n {\n \"earningsPerBasicShare\": 11.97,\n \"payoutRatio\": 0.251,\n \"updated\": \"2020-05-01\",\n \"workingCapital\": 57101000000,\n \"earningsBeforeInterestTaxesDepreciationAmortizationUSD\":\n 78284000000,\n \"priceEarnings\": 20.003,\n \"dividendYield\": 0.012,\n \"period\": \"Y\",\n \"earningsBeforeInterestTaxesDepreciationAmortization\": 78284000000,\n \"earningBeforeInterestTaxesUSD\": 65737000000,\n \"preferredDividendsIncomeStatementImpact\": 0,\n \"dividendsPerBasicCommonShare\": 3,\n \"earningsBeforeTax\": 65737000000,\n \"dateKey\": \"2019-10-31\",\n \"earningsPerDilutedShare\": 11.89,\n \"earningsPerBasicShareUSD\": 11.97,\n \"ticker\": \"AAPL\",\n \"earningBeforeInterestTaxes\": 65737000000\n },\n {\n \"earningsPerBasicShare\": 12.01,\n \"enterpriseValueOverEBIT\": 14,\n \"workingCapital\": 14473000000,\n \"priceToBookValue\": 8.928,\n \"weightedAverageSharesDiluted\": 5000109000,\n \"period\": \"Y\",\n \"priceSales\": 3.602,\n \"earningsBeforeInterestTaxesDepreciationAmortization\": 83806000000,\n \"tradeAndNonTradeReceivables\": 48995000000,\n \"totalLiabilities\": 258578000000,\n \"earningsPerDilutedShare\": 11.91,\n \"calendarDate\": \"2018-12-31\",\n \"earningsPerBasicShareUSD\": 12.01,\n \"earningsBeforeTax\": 72903000000,\n \"priceEarnings\": 16.069,\n \"netIncomeCommonStockUSD\": 59531000000,\n \"netIncomeCommonStock\": 59531000000,\n \"enterpriseValueOverEBITDA\": 12.472,\n \"earningBeforeInterestTaxesUSD\": 72903000000,\n \"effectOfExchangeRateChangesOnCash\": 0,\n \"updated\": \"2020-05-01\",\n \"earningsBeforeInterestTaxesDepreciationAmortizationUSD\":\n 83806000000,\n \"netIncome\": 59531000000,\n \"enterpriseValue\": 1045194782820,\n \"tradeAndNonTradePayables\": 55888000000,\n \"dateKey\": \"2018-11-05\",\n \"ticker\": \"AAPL\",\n \"weightedAverageShares\": 4955377000,\n \"preferredDividendsIncomeStatementImpact\": 0,\n \"earningBeforeInterestTaxes\": 72903000000\n }\n ]\n}\n ''',\n )\n\n ret = cli.financials_v2('AAPL',\n 2,\n FinancialsReportType.Y,\n FinancialsSort.CalendarDateDesc)\n\n assert len(ret) == 2\n assert type(ret) == polygon.entity.Financials\n assert type(ret[0]) == polygon.entity.Financial\n assert ret[0].ticker == \"AAPL\"\n\n # News\n reqmock.get(\n endpoint('/meta/symbols/AAPL/news'),\n text='''[{\"title\": \"Apple News\"}]''',\n )\n ret = cli.news('AAPL')\n assert ret[0].title == 'Apple News'\n\n with pytest.raises(ValueError):\n cli.company(['AAPL'] * 51)\n\n # paginated symbol list\n reqmock.get(\n endpoint('/reference/tickers', api_version='v2'),\n text='''\n{\"page\": 1, \"perPage\": 30, \"count\": 32657, \"status\": \"OK\", \"tickers\": [\n {\n \"ticker\": \"AAPL\",\n \"name\": \"Apple Inc.\",\n \"market\": \"STOCKS\",\n \"locale\": \"US\",\n \"currency\": \"USD\",\n \"active\": true,\n \"primaryExch\": \"NGS\",\n \"type\": \"cs\",\n \"codes\": {\n \"cik\": \"0000320193\",\n \"figiuid\": \"EQ0010169500001000\",\n \"scfigi\": \"BBG001S5N8V8\",\n \"cfigi\": \"BBG000B9XRY4\",\n \"figi\": \"BBG000B9Y5X2\"\n },\n \"updated\": \"2019-01-15T05:21:28.437Z\",\n \"url\": \"https://api.polygon.io/v2/reference/tickers/AAPL\"\n },\n {\n \"ticker\": \"GOOG\",\n \"name\": \"Google Inc.\",\n \"market\": \"STOCKS\",\n \"locale\": \"US\",\n \"currency\": \"USD\",\n \"active\": true,\n \"primaryExch\": \"NGS\",\n \"type\": \"cs\",\n \"codes\": {\n \"cik\": \"0000320193\",\n \"figiuid\": \"EQ0010169500001000\",\n \"scfigi\": \"BBG001S5N8V8\",\n \"cfigi\": \"BBG000B9XRY4\",\n \"figi\": \"BBG000B9Y5X2\"\n },\n \"updated\": \"2019-01-15T05:21:28.437Z\",\n \"url\": \"https://api.polygon.io/v2/reference/tickers/GOOG\"\n }\n]}''')\n cli.symbol_list_paginated(1, 2)\n # nothing to assert in the mock data. jsut checking params are parsed\n # correctly\n" ]
[ [ "pandas.Timestamp" ] ]
Data-is-life/apt-get-home
[ "77a212c19a90f201c70759fd9e99493657247ae7" ]
[ "src/initial_scrapper_function.py" ]
[ "# Author: Mohit Gangwani\n# Github: Data-is-Life\n# Date: 09/30/2018\n\nimport time\nimport random\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom random import randint\n\n\ndef session_creator(ua, url, proxy):\n '''This function is used to create a session to get data from a website.\n Proxies are used so the user's IP address is masked.'''\n\n header = random.sample(ua, 1)[0]\n session = requests.Session()\n session.proxies = {\"http\": proxy, \"https\": proxy}\n req = session.get(url, headers=header)\n\n soup = BeautifulSoup(req.text, 'lxml')\n\n return soup\n\n\ndef proxie_check(proxies):\n '''This function connects to a website that checks and if the proxies in\n the list are working.'''\n\n sprfst = []\n fst = []\n keep = []\n meh = []\n slw = []\n snl = []\n usls = []\n url = 'https://httpbin.org/ip'\n for i in range(1, (len(proxies))+1):\n proxy = proxies[i-1]\n start_time = time.time()\n try:\n response = requests.get(\n url, proxies={\"http\": proxy, \"https\": proxy})\n\n total_time = time.time()-start_time\n\n if total_time <= 1.00:\n sprfst.append(i)\n print(\n f'#{i} SUPERFAST: {total_time}')\n elif total_time <= 3.00:\n fst.append(i)\n print(f'#{i} Fast: {total_time}')\n elif total_time <= 10.00:\n keep.append(i)\n print(f'#{i} Keep: {total_time}')\n elif total_time <= 15.00:\n meh.append(i)\n print(f'#{i} Decide: {total_time}')\n elif total_time <= 20.00:\n slw.append(i)\n print(f'#{i} Slow: {total_time}')\n else:\n snl.append(i)\n print(f'#{i} Snail: {total_time}')\n except:\n total_time = time.time()-start_time\n usls.append(i)\n print(f'#{i} Delete: {total_time}')\n\n all_proxs = {}\n all_proxs['superfast'] = sprfst\n all_proxs['fast'] = fst\n all_proxs['keep'] = keep\n all_proxs['decide'] = meh\n all_proxs['slow'] = slw\n all_proxs['snail'] = snl\n all_proxs['delete'] = usls\n\n return all_proxs\n\n\ndef zip_prop_count(zip_list, proxies, prp_list, ua, ezl):\n '''This will be used later to collect the number of properties per\n zip code. This will be necessary since the majority of the sites limit the\n number of properties between 350 and 500 per search. If we find the number\n of properties is more than the website will allow per search, we have to\n add an additional filter (max sqft, price, etc.) to narrow the results per\n search. This runs well. Not being used currently. This will be used to run\n feature importance when it comes to pricing homes.'''\n\n proxy = random.sample(proxies, 1)[0]\n\n print(proxies.index(proxy))\n print(proxy)\n\n for num in zip_list:\n\n url = 'https://www.redfin.com/zipcode/' + \\\n str(num) + '/filter/property-type=house+condo+townhouse,' + \\\n 'include=sold-1yr,min-price=20k,min-baths=1,include=sold-1yr'\n\n try:\n\n start_time = time.time()\n soup = session_creator(proxy, ua, url)\n\n print(num)\n print(len(zip_list))\n\n all_count = soup.findAll('div', {'class': 'homes summary'})\n\n if len(str(all_count)) >= 20:\n print(all_count)\n print(time.time() - start_time)\n ezl.append(num)\n prp_list.append(all_count)\n zip_list.remove(num)\n print(len(zip_list) + len(prp_list))\n\n else:\n print(\"Captcha!!!!!\")\n except:\n print(\"Skipping. Connnection error\")\n proxies.remove(proxy)\n print(len(proxies))\n return prp_list, zip_list, proxies, ezl\n\n return prp_list, zip_list, proxies, ezl\n\n\ndef each_page(proxy, ua, url):\n '''Once we start running the search, the search page displays only home's \n basic features. This function collects homes information (home addresses \n and home URLs) from the search result page.'''\n\n soup = session_creator(proxy, ua, url)\n\n # start_time = time.time()\n time.sleep(random.uniform(0, 1) * 3)\n # print(time.time() - start_time)\n\n full_soup = soup.findAll('a', {'class': 'bottom link-override'})\n\n full_address = [fas['title'] for fas in full_soup]\n\n home_link = ['https://www.redfin.com' +\n str(hls.get('href')) for hls in full_soup]\n\n df = {'full_address': full_address, 'home_link': home_link}\n\n return df\n\n\ndef links_for_props(proxies, url_list, main_df, ua):\n '''After collecting all the search page's URLs, this function runs each\n URL and parses all homes address and URL from every single search pages.'''\n\n proxy = random.sample(proxies, 1)[0]\n print(f'proxy number: {proxy}')\n\n i = randint(0, (len(url_list) // 2))\n print(f'starting from url number: {i}')\n\n while i < len(url_list):\n url = url_list[i]\n\n try:\n b = random.uniform(0.75, 2.25)\n time.sleep(b)\n # start_time = time.time()\n # print(f'total left: {len(url_list)}')\n\n data = each_page(proxy, ua, url)\n df = pd.DataFrame(data)\n eds = {'full_address': [], 'home_link': []}\n if data['full_address'] != eds['full_address']:\n main_df = pd.concat([main_df, df])\n url_list.pop(i)\n # a = (time.time() - start_time) * len(url_list)\n # print('SUCCESS!!')\n # print(f'Currently on url number: {i}')\n # print(f'time taken: {a/len(url_list)}')\n if i > 0:\n i -= randint(0, 1)\n else:\n i += 0\n else:\n # print('No results')\n # print(f'Currently on url number: {i}')\n i += randint(1, 5)\n except:\n # print(\"Skipping. Connnection error\")\n proxies.remove(proxy)\n # print(f'proxies left: {len(proxies)}')\n # print(f'total left: {len(url_list)}')\n\n return url_list, main_df, proxies\n\n return url_list, main_df, proxies\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
Sai-Venky/Trackjectory
[ "273eb623410d5150f71a8828febc7c8ff4002e13" ]
[ "src/dataset/mot_dataset.py" ]
[ "import glob\nimport math\nimport os\nimport os.path as osp\nimport random\nimport time\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport cv2\nimport json\nimport numpy as np\nimport torch\nimport copy\n\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import transforms as T\nfrom cython_bbox import bbox_overlaps as bbox_ious\nfrom utils.config import opt\nfrom dataset.util import gaussian_radius, draw_umich_gaussian\n\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nclass LoadImages: # for inference\n default_resolution = [640, 480]\n mean = None\n std = None\n num_classes = 1\n def __init__(self, path, img_size=(640, 480)):\n if os.path.isdir(path):\n image_format = ['.jpg', '.jpeg', '.png', '.tif']\n self.files = sorted(glob.glob('%s/*.*' % path))\n self.files = list(filter(lambda x: os.path.splitext(x)[1].lower() in image_format, self.files))\n elif os.path.isfile(path):\n self.files = [path]\n\n self.nF = len(self.files) # number of image files\n self.width = (img_size[0]//32) * 32\n self.height = (img_size[1]//32) * 32\n self.count = 0\n\n assert self.nF > 0, 'No images found in ' + path\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if self.count == self.nF:\n raise StopIteration\n img_path = self.files[self.count]\n\n # Read image\n img0 = cv2.imread(img_path) # BGR\n assert img0 is not None, 'Failed to load ' + img_path\n\n # Padded resize\n img, _, _, _ = letterbox(img0, height=self.height, width=self.width)\n\n # Normalize RGB\n img = img0[:, :, ::-1].transpose(2, 0, 1)\n img = np.ascontiguousarray(img, dtype=np.float32)\n img /= 255.0\n\n return img_path, img, img0\n\n def __getitem__(self, idx):\n idx = idx % self.nF\n img_path = self.files[idx]\n\n # Read image\n img0 = cv2.imread(img_path) # BGR\n assert img0 is not None, 'Failed to load ' + img_path\n\n # Padded resize\n img, _, _, _ = letterbox(img0, height=self.height, width=self.width)\n\n # Normalize RGB\n img = img0[:, :, ::-1].transpose(2, 0, 1)\n img = np.ascontiguousarray(img, dtype=np.float32)\n img /= 255.0\n\n return img_path, img, img0\n\n def __len__(self):\n return self.nF # number of files\n\n\nclass MotDataset: # for training\n default_resolution = [640, 480]\n mean = None\n std = None\n num_classes = 1\n\n def __len__(self):\n return self.nF # number of batches\n\n def __init__(self, opt, img_size=(640, 480), augment=False, transforms=None):\n self.opt = opt\n\n self.num_classes = 1\n self.fr = 1\n self.img_files = sorted(glob.glob('%s/*/*/*.png' % opt.multi_images_dataset))\n self.label_files = sorted(glob.glob('%s/*/*/*.txt' % opt.multi_labels_dataset))\n\n self.nID = int(1 + 1) # fot no\n self.nF = len(self.label_files)\n self.width = img_size[0]\n self.height = img_size[1]\n self.max_objs = opt.K\n self.augment = augment\n self.transforms = transforms\n\n def __getitem__(self, files_index):\n index = files_index\n img_path = self.img_files[files_index]\n label_path = self.label_files[files_index]\n\n imgs, labels, img_path, (input_h, input_w) = self.get_data(img_path, label_path)\n\n output_h = imgs.shape[1] // self.opt.down_ratio\n output_w = imgs.shape[2] // self.opt.down_ratio\n\n num_classes = self.num_classes\n num_objs = labels.shape[0]\n hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)\n \n wh = np.zeros((self.max_objs, 4), dtype=np.float32)\n reg = np.zeros((self.max_objs, 2), dtype=np.float32)\n ind = np.zeros((self.max_objs, ), dtype=np.int64)\n reg_mask = np.zeros((self.max_objs, ), dtype=np.uint8)\n ids = np.zeros((self.max_objs, ), dtype=np.int64)\n bbox_xys = np.zeros((self.max_objs, 4), dtype=np.float32)\n\n draw_gaussian = draw_umich_gaussian\n for k in range(num_objs):\n label = labels[k]\n bbox = label[1:]\n cls_id = int(label[0])\n\n bbox[[0, 2]] = bbox[[0, 2]] * output_w\n bbox[[1, 3]] = bbox[[1, 3]] * output_h\n\n # x, y , w, h\n bbox_amodal = copy.deepcopy(bbox)\n bbox_amodal[0] = bbox_amodal[0] - bbox_amodal[2] / 2.\n bbox_amodal[1] = bbox_amodal[1] - bbox_amodal[3] / 2.\n bbox_amodal[2] = bbox_amodal[0] + bbox_amodal[2]\n bbox_amodal[3] = bbox_amodal[1] + bbox_amodal[3]\n # x1, y1, x2, y2\n # bbox[0] = np.clip(bbox[0], 0, output_w - 1)\n # bbox[1] = np.clip(bbox[1], 0, output_h - 1)\n\n h = bbox[3]\n w = bbox[2]\n\n bbox_xy = copy.deepcopy(bbox)\n bbox_xy[0] = bbox_xy[0] - bbox_xy[2] / 2\n bbox_xy[1] = bbox_xy[1] - bbox_xy[3] / 2\n bbox_xy[2] = bbox_xy[0] + bbox_xy[2]\n bbox_xy[3] = bbox_xy[1] + bbox_xy[3]\n # x1, y1, x2, y2\n\n if h > 0 and w > 0:\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n radius = max(0, int(radius))\n ct = np.array(\n [bbox[0], bbox[1]], dtype=np.float32)\n ct_int = ct.astype(np.int32)\n draw_gaussian(hm[cls_id], ct_int, radius)\n\n wh[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \\\n bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]\n\n ind[k] = ct_int[1] * output_w + ct_int[0]\n reg[k] = ct - ct_int\n reg_mask[k] = 1\n ids[k] = 0\n bbox_xys[k] = bbox_xy\n\n ret = {'input': imgs, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh, 'reg': reg, 'ids': ids, 'bbox': bbox_xys}\n return ret\n \n def xyxy2xywh(self, x):\n # Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]\n y = torch.zeros(x.shape) if x.dtype is torch.float32 else np.zeros(x.shape)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2\n y[:, 2] = x[:, 2] - x[:, 0]\n y[:, 3] = x[:, 3] - x[:, 1]\n return y\n \n def get_data(self, img_path, label_path):\n height = self.height\n width = self.width\n img = cv2.imread(img_path)\n\n h, w, _ = img.shape\n img, ratio, padw, padh = letterbox(img, height=height, width=width)\n\n # Load labels\n if os.path.isfile(label_path):\n labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 5)\n labels = labels0.copy()\n labels[:, 1] = ratio * w * (labels0[:, 1] - labels0[:, 3] / 2) + padw\n labels[:, 2] = ratio * h * (labels0[:, 2] - labels0[:, 4] / 2) + padh\n labels[:, 3] = ratio * w * (labels0[:, 1] + labels0[:, 3] / 2) + padw\n labels[:, 4] = ratio * h * (labels0[:, 2] + labels0[:, 4] / 2) + padh\n\n img, labels, M = random_affine(img, self.fr, labels, degrees=(-5, 5), translate=(0.10, 0.10), scale=(0.50, 1.20))\n self.fr +=1\n img00 = img\n if self.transforms is not None:\n img = self.transforms(img)\n nL = len(labels)\n\n if nL > 0:\n labels[:, 1:5] = self.xyxy2xywh(labels[:, 1:5].copy())\n labels[:, 1] /= width\n labels[:, 2] /= height\n labels[:, 3] /= width\n labels[:, 4] /= height\n return img, labels, img_path, (h, w)\n\n\ndef letterbox(img, height=608, width=1088,\n color=(127.5, 127.5, 127.5)): # resize a rectangular image to a padded rectangular\n shape = img.shape[:2] # shape = [height, width]\n ratio = min(float(height) / shape[0], float(width) / shape[1])\n new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]\n dw = (width - new_shape[0]) / 2 # width padding\n dh = (height - new_shape[1]) / 2 # height padding\n top, bottom = round(dh - 0.1), round(dh + 0.1)\n left, right = round(dw - 0.1), round(dw + 0.1)\n img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded rectangular\n return img, ratio, dw, dh\n\ndef random_affine(img, frame, targets=None, degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2), borderValue=(127.5, 127.5, 127.5)):\n\n border = 0 # width of added border (optional)\n height = img.shape[0]\n width = img.shape[1]\n \n\n # Rotation and Scale\n R = np.eye(3)\n a = random.random() * (degrees[1] - degrees[0]) + degrees[0]\n # a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations\n s = random.random() * (scale[1] - scale[0]) + scale[0]\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels)\n T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg)\n\n M = S @ T @ R \n imw = cv2.warpPerspective(img, M, dsize=(width, height), flags=cv2.INTER_LINEAR,\n borderValue=borderValue)\n\n if targets is not None:\n if len(targets) > 0:\n n = targets.shape[0]\n points = targets[:, 1:5].copy()\n area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])\n\n # warp points\n xy = np.ones((n * 4, 3))\n xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n xy = (xy @ M.T)[:, :2].reshape(n, 8)\n\n # create new boxes\n x = xy[:, [0, 2, 4, 6]]\n y = xy[:, [1, 3, 5, 7]]\n xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n # apply angle-based reduction\n radians = a * math.pi / 180\n reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5\n x = (xy[:, 2] + xy[:, 0]) / 2\n y = (xy[:, 3] + xy[:, 1]) / 2\n w = (xy[:, 2] - xy[:, 0]) * reduction\n h = (xy[:, 3] - xy[:, 1]) * reduction\n xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T\n\n w = xy[:, 2] - xy[:, 0]\n h = xy[:, 3] - xy[:, 1]\n area = w * h\n ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))\n i = (w > 0) & (h > 0) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)\n\n targets = targets[i]\n targets[:, 1:5] = xy[i]\n vis(imw, str(frame) + '.png', xy[:, 0], xy[:, 1], xy[:, 2] - xy[:, 0], xy[:, 3] - xy[:, 1])\n frame +=1\n return imw, targets, M\n else:\n return imw\n\n\ndef vis(img, nm, lowerx = None, lowery = None, bbox_width = None, bbox_height = None):\n fig,ax = plt.subplots(1)\n ax.imshow(img)\n if lowerx:\n for i in range(lowerx.shape[0]):\n if i > 4:\n continue\n rect = patches.Rectangle((lowerx[i],lowery[i]),bbox_width[i],bbox_height[i],linewidth=1,edgecolor='y',facecolor='none')\n ax.add_patch(rect)\n plt.savefig(nm)" ]
[ [ "numpy.maximum", "torch.zeros", "numpy.ascontiguousarray", "numpy.eye", "matplotlib.patches.Rectangle", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "numpy.ones", "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.loadtxt" ] ]
FTC-12586/Tensor-Flow-Machine-Learning
[ "b32be8b49b9364dccc6b213c3f0720314c8d103f" ]
[ "src/learning_rate.py" ]
[ "import tensorflow as tf\nfrom tensorflow import keras\n\n\nclass LearningRateScheduler(keras.callbacks.Callback):\n \"\"\"Learning rate scheduler which sets the learning rate according to schedule.\n\n Arguments:\n schedule: a function that takes an epoch index\n (integer, indexed from 0) and current learning rate\n as inputs and returns a new learning rate as output (float).\n \"\"\"\n\n def __init__(self, schedule):\n super(LearningRateScheduler, self).__init__()\n self.schedule = schedule\n\n def on_epoch_begin(self, epoch, logs=None):\n if not hasattr(self.model.optimizer, \"lr\"):\n raise ValueError('Optimizer must have a \"lr\" attribute.')\n # Get the current learning rate from model's optimizer.\n lr = float(tf.keras.backend.get_value(self.model.optimizer.learning_rate))\n # Call schedule function to get the scheduled learning rate.\n scheduled_lr = self.schedule(epoch, lr)\n # Set the value back to the optimizer before this epoch starts\n tf.keras.backend.set_value(self.model.optimizer.lr, scheduled_lr)\n print(\"\\nEpoch %05d: Learning rate is %6.4f.\" % (epoch, scheduled_lr))\n" ]
[ [ "tensorflow.keras.backend.get_value", "tensorflow.keras.backend.set_value" ] ]
mnansary/banglaOCR
[ "20d48810eee8e1a6d47d60e716764c32a1c6df6f" ]
[ "coreLib/store.py" ]
[ "#-*- coding: utf-8 -*-\n\"\"\"\n@author:MD.Nazmuddoha Ansary\n\"\"\"\nfrom __future__ import print_function\n# ---------------------------------------------------------\n# imports\n# ---------------------------------------------------------\n\nimport os\nimport tensorflow as tf \nfrom tqdm import tqdm\nimport numpy as np\n# ---------------------------------------------------------\n# globals\n# ---------------------------------------------------------\n# number of images to store in a tfrecord\nDATA_NUM = 1024\n\n#---------------------------------------------------------------\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value.flatten()))\ndef _int64_list_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\ndef to_tfrecord(df,save_dir,r_num):\n '''\t \n Creates tfrecords from Provided Image Paths\t \n args:\t \n df : dataframe that contains img_path,glabel\t \n save_dir : location to save the tfrecords\t \n r_num : record number\t\n '''\n # record name\n tfrecord_name='{}.tfrecord'.format(r_num)\n # path\n tfrecord_path=os.path.join(save_dir,tfrecord_name)\n with tf.io.TFRecordWriter(tfrecord_path) as writer: \n for idx in range(len(df)):\n image_path=df.iloc[idx,0]\n glabel =df.iloc[idx,1]\n \n target_path=str(image_path).replace('images','targets')\n map_path =str(image_path).replace('images','maps').replace(\".png\",\".npy\")\n #image\n with(open(image_path,'rb')) as fid:\n image_bytes=fid.read()\n # target\n with(open(target_path,'rb')) as fid:\n target_bytes=fid.read()\n # map\n _map_data=np.load(map_path)\n _map_data=_map_data.astype(\"int\")\n \n \n data ={ 'image' :_bytes_feature(image_bytes),\n 'target' :_bytes_feature(target_bytes),\n 'seg' :_int64_feature(_map_data),\n 'glabel' :_int64_list_feature(glabel)\n }\n # write\n features=tf.train.Features(feature=data)\n example= tf.train.Example(features=features)\n serialized=example.SerializeToString()\n writer.write(serialized)\n\n\ndef genTFRecords(df,mode_dir):\n '''\t \n tf record wrapper\n args:\t \n df : dataframe that contains \"img_path\" and \"glabel\"\t \n mode_dir : location to save the tfrecords\t \n '''\n for i in tqdm(range(0,len(df),DATA_NUM)):\n \n _df=df.iloc[i:i+DATA_NUM]\n # record num\n r_num=i // DATA_NUM\n # create tfrecord\n to_tfrecord(_df,mode_dir,r_num) " ]
[ [ "tensorflow.io.TFRecordWriter", "tensorflow.train.Example", "tensorflow.train.Features", "tensorflow.train.BytesList", "numpy.load", "tensorflow.train.Int64List" ] ]
miramirakim227/SwapNeRF_GT
[ "84444660a7fc8b5f796503d90f3a055889c44389" ]
[ "src/model/layers.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\nfrom kornia.filters import filter2D\nimport torch\n\n\n# Resnet Blocks\nclass ResnetBlockFC(nn.Module):\n ''' Fully connected ResNet Block class.\n\n Args:\n size_in (int): input dimension\n size_out (int): output dimension\n size_h (int): hidden dimension\n '''\n\n def __init__(self, size_in, size_out=None, size_h=None):\n super().__init__()\n # Attributes\n if size_out is None:\n size_out = size_in\n\n if size_h is None:\n size_h = min(size_in, size_out)\n\n self.size_in = size_in\n self.size_h = size_h\n self.size_out = size_out\n # Submodules\n self.fc_0 = nn.Linear(size_in, size_h)\n self.fc_1 = nn.Linear(size_h, size_out)\n self.actvn = nn.ReLU()\n\n print(f'pass here?')\n\n if size_in == size_out:\n self.shortcut = None\n else:\n self.shortcut = nn.Linear(size_in, size_out, bias=False)\n # Initialization\n nn.init.zeros_(self.fc_1.weight)\n\n def forward(self, x):\n net = self.fc_0(self.actvn(x))\n dx = self.fc_1(self.actvn(net))\n\n if self.shortcut is not None:\n x_s = self.shortcut(x)\n else:\n x_s = x\n\n return x_s + dx\n\n\nclass ResnetBlock(nn.Module):\n def __init__(self, fin, fout, fhidden=None, is_bias=True):\n super().__init__()\n # Attributes\n self.is_bias = is_bias\n self.learned_shortcut = (fin != fout)\n self.fin = fin\n self.fout = fout\n if fhidden is None:\n self.fhidden = min(fin, fout)\n else:\n self.fhidden = fhidden\n\n # Submodules\n self.conv_0 = nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1)\n self.conv_1 = nn.Conv2d(self.fhidden, self.fout,\n 3, stride=1, padding=1, bias=is_bias)\n if self.learned_shortcut:\n self.conv_s = nn.Conv2d(\n self.fin, self.fout, 1, stride=1, padding=0, bias=False)\n\n def actvn(self, x):\n out = F.leaky_relu(x, 2e-1)\n return out\n\n def forward(self, x):\n x_s = self._shortcut(x)\n dx = self.conv_0(self.actvn(x))\n dx = self.conv_1(self.actvn(dx))\n out = x_s + 0.1*dx\n\n return out\n\n def _shortcut(self, x):\n if self.learned_shortcut:\n x_s = self.conv_s(x)\n else:\n x_s = x\n return x_s\n\n\nclass Blur(nn.Module):\n def __init__(self):\n super().__init__()\n f = torch.Tensor([1, 2, 1])\n self.register_buffer('f', f)\n\n def forward(self, x):\n f = self.f\n f = f[None, None, :] * f[None, :, None]\n return filter2D(x, f, normalized=True)\n" ]
[ [ "torch.Tensor", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.functional.leaky_relu", "torch.nn.init.zeros_", "torch.nn.ReLU" ] ]
yusuf-onur/erp-captcha-cracker
[ "2e05a6eb9bd3b3a925709799b3912441a233d620" ]
[ "cracker/extract_digits.py" ]
[ "from utils import *\n\nfrom PIL import Image\nimport numpy as np\nfrom scipy.ndimage.measurements import label\n\ndef get_digits(img, gray_thres=130, size_thres=20):\n\ta = np.asarray(img).copy()\n\ta = np.delete(a, 3, 2) # remove alpha channel\n\n\tmask = a[:, :, 0] >= gray_thres\n\ta[mask, :] = 255 # remove gridline + gray dots\n\ta[find_holes(mask)] = [25, 151, 87] # fill holes\n\t\n\t# turn into grayscale\n\ta = 255 - np.mean(a, axis=2) > 0\n\n\tsave_img(a, \"pre\")\n\n\t# a = remove_spots(a, size_thres)\n\tlbl, cnt = label(a) # find connected components using scipy\n\ta = lbl\n\n\tdigits = []\n\n\tfor i in range(1, cnt+1):\n\t\tcur = a == i\n\t\tif np.sum(cur) < size_thres: a[cur] = 0\n\t\telse:\n\t\t\tmnv, mnh = np.argmax(cur, axis=0), np.argmax(cur, axis=1)\n\t\t\ty, x = np.min(mnv[mnv>0]), np.min(mnh[mnh>0])\n\t\t\tcur = np.roll(cur, -y, axis=0)\n\t\t\tcur = np.roll(cur, -x, axis=1)\n\t\t\tcur = cur[:30,:30]\n\t\t\tdigits.append((x, cur))\n\n\tres = []\n\tfor (x, d), i in zip(sorted(digits), range(6)):\n\t\tres.append(d.flatten())\n\t\tsave_img(d, f\"{i}\")\n\tsave_img(a>0, \"result\")\n\n\treturn res\n\n# super jank but works\ndef find_holes(mask):\n\tres = mask\n\tmask = ~mask\n\n\tmask = np.roll(mask, 1, axis=0)\n\tres = res & mask\n\tmask = np.roll(mask, -2, axis=0)\n\tres = res & mask\n\tmask = np.roll(mask, 1, axis=0)\n\n\tmask = np.roll(mask, 1, axis=1)\n\tres = res & mask\n\tmask = np.roll(mask, -2, axis=1)\n\tres = res & mask\n\tmask = np.roll(mask, 1, axis=1)\n\n\treturn res\n\n# not reliable enough - still leaves some spots - also jank\ndef remove_spots(a, threshold):\n\t# vertical distances\n\tzeros = a == 0\n\tidx = np.repeat(np.arange(a.shape[0])[:, np.newaxis], a.shape[1], axis=1)\n\n\tdown = idx - np.maximum.accumulate(idx*zeros)\n\tdown *= ~zeros\n\n\tzeros = zeros[::-1]\n\tup = idx - np.maximum.accumulate(idx*zeros)\n\tup = up[::-1] * ~zeros[::-1]\n\n\t# horizontal distances\n\tzeros = a == 0\n\tidx = np.repeat(np.arange(a.shape[1])[:, np.newaxis].T, a.shape[0], axis=0)\n\n\tright = idx - np.maximum.accumulate(idx*zeros, axis=1)\n\tright *= ~zeros\n\n\tzeros = zeros[:, ::-1]\n\tleft = idx - np.maximum.accumulate(idx*zeros, axis=1)\n\tleft = left[:, ::-1] * ~zeros[:, ::-1]\n\n\treturn (up + down + left + right) >= threshold" ]
[ [ "numpy.sum", "numpy.min", "numpy.asarray", "numpy.arange", "scipy.ndimage.measurements.label", "numpy.delete", "numpy.argmax", "numpy.mean", "numpy.maximum.accumulate", "numpy.roll" ] ]
saraswat/munkhdalai-nse
[ "11264ea0c88039cbdca08fc72dbd8620074df120" ]
[ "snli/NSE_MMA_attention.py" ]
[ "import math\nimport sys\nimport time\nimport copy\nimport numpy as np\nimport six\nfrom chainer import cuda, Variable, FunctionSet, optimizers\nimport chainer.functions as F\nimport chainer.links as L\nimport chainer\n\nclass NSE_MMA_attention(chainer.Chain):\n\n\t\"\"\"docstring for NSE_MMA_attention\"\"\"\n\tdef __init__(self, n_units, gpu):\n\t\tsuper(NSE_MMA_attention, self).__init__(\n\t\t\tcompose_l1 = F.Linear(2 * n_units, 2 * n_units),\n\t\t\tread_lstm = L.LSTM(n_units, n_units),\n\t\t\twrite_lstm = L.LSTM(2 * n_units, n_units),\n\t\t\tcompose2_l1 = F.Linear(3 * n_units, 3 * n_units),\n\t\t\tread2_lstm = L.LSTM(n_units, n_units),\n\t\t\twrite2_lstm = L.LSTM(3 * n_units, n_units),\n\t\t\tw_ap = F.Linear(n_units, n_units),\n\t\t\tw_aw = F.Linear(n_units, n_units),\n\t\t\tw_we = F.Linear(n_units, 1),\n\t\t\th_l1 = F.Linear(4 * n_units, 1024),\n\t\t\tl_y = F.Linear(1024, 3))\n\t\tself.__n_units = n_units\n\t\tself.__gpu = gpu\n\t\tself.__mod = cuda.cupy if gpu >= 0 else np\n\t\tfor param in self.params():\n\t\t\tdata = param.data\n\t\t\tdata[:] = np.random.uniform(-0.1, 0.1, data.shape)\n\t\tif gpu >= 0:\n\t\t\tcuda.get_device(gpu).use()\n\t\t\tself.to_gpu()\n\n\tdef init_optimizer(self):\n\t\tself.__opt = optimizers.Adam(alpha=0.0003, beta1=0.9, beta2=0.999, eps=1e-08)\n\t\tself.__opt.setup(self)\n\t\tself.__opt.add_hook(chainer.optimizer.GradientClipping(15))\n\t\tself.__opt.add_hook(chainer.optimizer.WeightDecay(0.00003))\n\n\tdef save(self, filename):\n\t\tchainer.serializers.save_npz(filename, self)\n\n\t@staticmethod\n\tdef load(filename, n_units, gpu):\n\t\tself = NSE_MMA_attention(n_units, gpu)\n\t\tchainer.serializers.load_npz(filename, self)\n\t\treturn self\n\n\tdef reset_state(self):\n\t\tself.read_lstm.reset_state()\n\t\tself.write_lstm.reset_state()\n\t\tself.read2_lstm.reset_state()\n\t\tself.write2_lstm.reset_state()\n\n\tdef __attend(self, hs, q, batch_size, train):\n\t\tn_units = self.__n_units\n\t\tmod = self.__mod\n\t\t\n\t\t# calculate attention weights\n\t\tx_len = len(hs)\n\t\tlist_e = []\n\t\tsum_e = Variable(mod.zeros((batch_size, 1), dtype=np.float32), volatile=not train)\n\t\tfor l in range(x_len):\n\t\t\ts_w = F.relu(self.w_aw(hs[l]) + self.w_ap(q))\n\t\t\tr_e = F.exp(self.w_we(s_w))\n\t\t\tlist_e.append(r_e)\n\t\t\tsum_e += r_e\n\n\t\t# make attention vector\n\t\ts_c = Variable(mod.zeros((batch_size, n_units), dtype=np.float32), volatile=not train)\n\t\tlist_en = []\n\t\tfor l in range(x_len):\n\t\t\ts_e = list_e[l] / sum_e\n\t\t\tlist_en.append(s_e)\n\t\t\ts_c += F.reshape(F.batch_matmul(hs[l], s_e), (batch_size, n_units))\n\n\t\treturn s_c\n\n\tdef read(self, M_t, x_t, batch_size, train):\n\t\t\"\"\"\n\t\tThe NSE read operation: Eq. 1-3 in the paper\n\t\t\"\"\"\n\n\t\to_t = self.read_lstm(F.dropout(x_t, ratio=0.3, train=train))\n\t\tz_t = F.softmax(F.reshape(F.batch_matmul(M_t, o_t), (batch_size, -1)))\n\t\tm_t = F.reshape(F.batch_matmul(z_t, M_t, transa=True), (batch_size, -1))\n\t\treturn o_t, m_t, z_t\n\n\tdef compose(self, o_t, m_t, train):\n\t\t\"\"\"\n\t\tThe NSE compose operation: Eq. 4\n\t\tThis could be any DNN. Also we could rather compose x_t and m_t. But that is a detail.\n\t\t\"\"\"\n\n\t\tc_t = self.compose_l1(F.concat([o_t, m_t], axis=1))\n\t\treturn c_t\n\n\tdef write(self, M_t, c_t, z_t, full_shape, train):\n\t\t\"\"\"\n\t\tThe NSE write operation: Eq. 5 and 6. Here we can write back c_t instead. You could try :)\n\t\t\"\"\"\n\n\t\th_t = self.write_lstm(F.dropout(c_t, ratio=0.3, train=train))\n\t\tM_t = F.broadcast_to(F.reshape((1 - z_t), (full_shape[0], full_shape[1], 1)), full_shape) * M_t\n\t\tM_t += F.broadcast_to(F.reshape(z_t, (full_shape[0], full_shape[1], 1)), full_shape)*F.broadcast_to(F.reshape(h_t, (full_shape[0], 1, full_shape[2])), full_shape)\n\t\treturn M_t, h_t\n\n\tdef read2(self, M_t, M2_t, x_t, batch_size, train):\n\t\t\n\t\to_t = self.read2_lstm(F.dropout(x_t, ratio=0.3, train=train))\n\t\tz_t = F.softmax(F.reshape(F.batch_matmul(M_t, o_t), (batch_size, -1)))\n\t\tm_t = F.reshape(F.batch_matmul(z_t, M_t, transa=True), (batch_size, -1))\n\t\tz2_t = F.softmax(F.reshape(F.batch_matmul(M2_t, o_t), (batch_size, -1)))\n\t\tm2_t = F.reshape(F.batch_matmul(z2_t, M2_t, transa=True), (batch_size, -1))\n\t\treturn o_t, m_t, z_t, m2_t, z2_t\n\n\tdef compose2(self, o_t, m_t, m2_t, train):\n\t\t\n\t\tc_t = self.compose2_l1(F.concat([o_t, m2_t, m_t], axis=1))\n\t\treturn c_t\n\n\tdef write2(self, M_t, M2_t, c_t, z_t, z2_t, full_shape, train):\n\t\t\n\t\th_t = self.write2_lstm(F.dropout(c_t, ratio=0.3, train=train))\n\t\tM_t = F.broadcast_to(F.reshape((1 - z_t), (full_shape[0], full_shape[1], 1)), full_shape) * M_t\n\t\tM_t += F.broadcast_to(F.reshape(z_t, (full_shape[0], full_shape[1], 1)), full_shape)*F.broadcast_to(F.reshape(h_t, (full_shape[0], 1, full_shape[2])), full_shape)\n\t\tM2_t = F.broadcast_to(F.reshape((1 - z2_t), (full_shape[0], full_shape[1], 1)), full_shape) * M2_t\n\t\tM2_t += F.broadcast_to(F.reshape(z2_t, (full_shape[0], full_shape[1], 1)), full_shape)*F.broadcast_to(F.reshape(h_t, (full_shape[0], 1, full_shape[2])), full_shape)\n\t\treturn M_t, M2_t, h_t\n\n\tdef __forward(self, train, a_batch, q_batch, y_batch = None):\n\t\tn_units = self.__n_units\n\t\tmod = self.__mod\n\t\tgpu = self.__gpu\n\t\tbatch_size = len(a_batch)\n\t\tx_len = len(a_batch[0])\n\t\t\n\t\tif gpu >=0:\n\t\t\ta_batch = [[mod.array(e) for e in row] for row in a_batch]\n\t\t\n\t\tself.reset_state()\n\n\t\tx_data = mod.concatenate([mod.transpose(mod.concatenate(a_batch[b], axis=0)).reshape((1,n_units,1,x_len)) for b in range(batch_size)], axis=0)\n\t\tx = Variable(x_data, volatile=not train)\n\t\tx = F.reshape(x, (batch_size,n_units,x_len))\n\t\tM_t = F.swapaxes(x, 1, 2)\n\t\t\n\t\tlist_h = []\n\t\tfull_shape = (batch_size, x_len, n_units)\n\t\tfor l in range(x_len):\n\t\t\tx_t = []\n\t\t\tfor b in range(batch_size):\n\t\t\t\tx = a_batch[b][l]\n\t\t\t\tx_t.append(x)\n\t\t\tx_t = Variable(mod.concatenate(x_t, axis=0), volatile=not train)\n\t\t\to_t, m_t, z_t = self.read(M_t, x_t, batch_size, train)\n\t\t\tc_t = self.compose(o_t, m_t, train)\n\t\t\tM_t, h_t = self.write(M_t, c_t, z_t, full_shape, train)\n\t\t\tlist_h.append(h_t)\n\n\t\tif gpu >=0:\n\t\t\tq_batch = [[mod.array(e) for e in row] for row in q_batch]\n\n\t\tx_data = mod.concatenate([mod.transpose(mod.concatenate(q_batch[b], axis=0)).reshape((1,n_units,1,x_len)) for b in range(batch_size)], axis=0)\n\t\tx = Variable(x_data, volatile=not train)\n\t\tx = F.reshape(x, (batch_size,n_units,x_len))\n\t\tM2_t = F.swapaxes(x, 1, 2)\n\t\t\n\t\tfor l in range(x_len):\n\t\t\tx_t = []\n\t\t\tfor b in range(batch_size):\n\t\t\t\tx = q_batch[b][l]\n\t\t\t\tx_t.append(x)\n\t\t\tx_t = Variable(mod.concatenate(x_t, axis=0), volatile=not train)\n\t\t\to_t, m_t, z_t, m2_t, z2_t = self.read2(M_t, M2_t, x_t, batch_size, train)\n\t\t\tc_t = self.compose2(o_t, m_t, m2_t, train)\n\t\t\tM_t, M2_t, h2_t = self.write2(M_t, M2_t, c_t, z_t, z2_t, full_shape, train)\n\t\t\n\t\th_t = self.__attend(list_h, h2_t, batch_size, train)\n\t\ths = F.concat([F.concat([h_t, h2_t], axis=1), h_t-h2_t, h_t*h2_t], axis=1)\n\t\ths = F.relu(self.h_l1(hs))\n\t\ty = self.l_y(F.dropout(hs, ratio=0.3, train=train))\n\t\tpreds = mod.argmax(y.data, 1).tolist()\n\n\t\taccum_loss = 0 if train else None\n\t\tif train:\n\t\t\tif gpu >= 0:\n\t\t\t\ty_batch = cuda.to_gpu(y_batch)\n\t\t\tlbl = Variable(y_batch, volatile=not train)\n\t\t\taccum_loss = F.softmax_cross_entropy(y, lbl)\n\t\t\n\t\treturn preds, accum_loss\n\n\tdef train(self, a_batch, q_batch, y_batch):\n\t\tself.__opt.zero_grads()\n\t\tpreds, accum_loss = self.__forward(True, a_batch, q_batch, y_batch=y_batch)\n\t\taccum_loss.backward()\n\t\tself.__opt.update()\n\t\treturn preds, accum_loss\n\n\tdef predict(self, a_batch, q_batch):\n\t\treturn self.__forward(False, a_batch, q_batch)[0]" ]
[ [ "numpy.random.uniform" ] ]
renyi533/ranking
[ "6cf8f70a8533ba15abbfb5f50db17cb01fc56410" ]
[ "tensorflow_ranking/python/keras/feature_test.py" ]
[ "# Copyright 2021 The TensorFlow Ranking Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Test for Keras feature transformations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_ranking.python.keras import feature\n\n\ndef _get_feature_columns():\n\n def _normalizer_fn(t):\n return tf.math.log1p(t * tf.sign(t)) * tf.sign(t)\n\n context_feature_columns = {\n 'query_length':\n tf.feature_column.numeric_column(\n 'query_length', shape=(1,), default_value=0, dtype=tf.int64)\n }\n example_feature_columns = {\n 'utility':\n tf.feature_column.numeric_column(\n 'utility',\n shape=(1,),\n default_value=0.0,\n dtype=tf.float32,\n normalizer_fn=_normalizer_fn),\n 'unigrams':\n tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_vocabulary_list(\n 'unigrams',\n vocabulary_list=[\n 'ranking', 'regression', 'classification', 'ordinal'\n ]),\n dimension=10)\n }\n custom_objects = {'_normalizer_fn': _normalizer_fn}\n return context_feature_columns, example_feature_columns, custom_objects\n\n\ndef _features():\n return {\n 'query_length':\n tf.convert_to_tensor(value=[[1], [2]]),\n 'utility':\n tf.convert_to_tensor(value=[[[1.0], [0.0]], [[0.0], [1.0]]]),\n 'unigrams':\n tf.SparseTensor(\n indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]],\n values=['ranking', 'regression', 'classification', 'ordinal'],\n dense_shape=[2, 2, 1]),\n 'example_feature_size':\n tf.convert_to_tensor(value=[1, 2])\n }\n\n\ndef _clone_keras_obj(obj, custom_objects=None):\n return obj.__class__.from_config(\n obj.get_config(), custom_objects=custom_objects)\n\n\nclass KerasInputsTest(tf.test.TestCase):\n\n def setUp(self):\n super(KerasInputsTest, self).setUp()\n (context_feature_columns, example_feature_columns,\n _) = _get_feature_columns()\n self._context_feature_columns = context_feature_columns\n self._example_feature_columns = example_feature_columns\n\n def test_keras_inputs_dynamic_list_shape(self):\n keras_inputs = feature.create_keras_inputs(\n context_feature_columns=self._context_feature_columns,\n example_feature_columns=self._example_feature_columns,\n size_feature_name=None)\n\n self.assertEqual(keras_inputs['query_length'].shape.as_list(), [None, 1])\n self.assertEqual(keras_inputs['query_length'].dtype, tf.int64)\n\n self.assertEqual(keras_inputs['utility'].shape.as_list(), [None, None, 1])\n self.assertEqual(keras_inputs['utility'].dtype, tf.float32)\n\n self.assertEqual(keras_inputs['unigrams'].dtype, tf.string)\n\n\nclass EncodeListwiseFeaturesTest(tf.test.TestCase):\n\n def setUp(self):\n super(EncodeListwiseFeaturesTest, self).setUp()\n (context_feature_columns, example_feature_columns,\n custom_objects) = _get_feature_columns()\n self._context_feature_columns = context_feature_columns\n self._example_feature_columns = example_feature_columns\n self._custom_objects = custom_objects\n\n # Batch size = 2, list_size = 2.\n self._features = _features()\n self._listwise_dense_layer = feature.EncodeListwiseFeatures(\n context_feature_columns=self._context_feature_columns,\n example_feature_columns=self._example_feature_columns)\n\n def test_get_config(self):\n # Check save and restore config.\n restored_layer = _clone_keras_obj(\n self._listwise_dense_layer, custom_objects=self._custom_objects)\n self.assertEqual(\n restored_layer._context_feature_columns['query_length'].get_config(),\n self._context_feature_columns['query_length'].get_config())\n self.assertEqual(\n restored_layer._example_feature_columns['utility'].get_config(),\n self._example_feature_columns['utility'].get_config())\n self.assertEqual(\n restored_layer._example_feature_columns['unigrams'].get_config(),\n self._example_feature_columns['unigrams'].get_config())\n\n def test_listwise_dense_layer(self):\n context_features, example_features = self._listwise_dense_layer(\n inputs=self._features, training=False)\n self.assertAllInSet(['query_length'], set(six.iterkeys(context_features)))\n self.assertAllInSet(['unigrams', 'utility'],\n set(six.iterkeys(example_features)))\n self.assertAllEqual(example_features['unigrams'].get_shape().as_list(),\n [2, 2, 10])\n self.assertAllEqual(context_features['query_length'], [[1], [2]])\n self.assertAllEqual(\n example_features['utility'],\n [[[tf.math.log1p(1.0)], [0.0]], [[0.0], [tf.math.log1p(1.0)]]])\n\n def test_create_keras_inputs(self):\n keras_inputs = feature.create_keras_inputs(\n context_feature_columns=self._context_feature_columns,\n example_feature_columns=self._example_feature_columns,\n size_feature_name='example_list_size')\n\n self.assertCountEqual(\n keras_inputs.keys(),\n list(self._context_feature_columns.keys()) +\n list(self._example_feature_columns.keys()) + ['example_list_size'])\n\n def test_create_keras_inputs_sparse_features(self):\n context_feature_columns = {\n 'query':\n tf.feature_column.categorical_column_with_vocabulary_list(\n 'query',\n vocabulary_list=[\n 'ranking', 'regression', 'classification', 'ordinal'\n ])\n }\n example_feature_columns = {\n 'title':\n tf.feature_column.categorical_column_with_vocabulary_list(\n 'title',\n vocabulary_list=[\n 'ranking', 'regression', 'classification', 'ordinal'\n ])\n }\n keras_inputs = feature.create_keras_inputs(\n context_feature_columns=context_feature_columns,\n example_feature_columns=example_feature_columns,\n size_feature_name='example_list_size')\n\n self.assertCountEqual(\n keras_inputs.keys(),\n list(context_feature_columns.keys()) +\n list(example_feature_columns.keys()) + ['example_list_size'])\n\n\nclass GenerateMaskTest(tf.test.TestCase):\n\n def setUp(self):\n super(GenerateMaskTest, self).setUp()\n (context_feature_columns, example_feature_columns,\n custom_objects) = _get_feature_columns()\n self._context_feature_columns = context_feature_columns\n self._example_feature_columns = example_feature_columns\n self._custom_objects = custom_objects\n\n # Batch size = 2, list_size = 2.\n self._features = _features()\n self._mask_generator_layer = feature.GenerateMask(\n example_feature_columns=self._example_feature_columns,\n size_feature_name='example_feature_size')\n\n def test_get_config(self):\n # Check save and restore config.\n restored_layer = _clone_keras_obj(\n self._mask_generator_layer, custom_objects=self._custom_objects)\n self.assertEqual(\n restored_layer.example_feature_columns['utility'].get_config(),\n self._example_feature_columns['utility'].get_config())\n self.assertEqual(\n restored_layer.example_feature_columns['unigrams'].get_config(),\n self._example_feature_columns['unigrams'].get_config())\n\n def test_mask_generator_layer(self):\n mask = self._mask_generator_layer(inputs=self._features, training=False)\n expected_mask = [[True, False], [True, True]]\n self.assertAllEqual(expected_mask, mask)\n\n\nclass FeatureColumnSerializationTest(tf.test.TestCase):\n\n def setUp(self):\n super(FeatureColumnSerializationTest, self).setUp()\n (_, example_feature_columns, custom_objects) = _get_feature_columns()\n self._feature_columns = example_feature_columns\n self._custom_objects = custom_objects\n\n def test_deserialization(self):\n serialized_feature_columns = feature.serialize_feature_columns(\n self._feature_columns)\n restored_feature_columns = feature.deserialize_feature_columns(\n serialized_feature_columns, custom_objects=self._custom_objects)\n self.assertEqual(restored_feature_columns['utility'].get_config(),\n self._feature_columns['utility'].get_config())\n self.assertEqual(restored_feature_columns['unigrams'].get_config(),\n self._feature_columns['unigrams'].get_config())\n\n\nif __name__ == '__main__':\n tf.enable_v2_behavior()\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.enable_v2_behavior", "tensorflow.compat.v2.math.log1p", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.feature_column.categorical_column_with_vocabulary_list", "tensorflow.compat.v2.feature_column.numeric_column", "tensorflow.compat.v2.sign", "tensorflow.compat.v2.SparseTensor" ] ]
NetherlandsForensicInstitute/lir
[ "341b9db3153da0b00c7bfdae01b897f2850b4050" ]
[ "lir/_plotting_new.py" ]
[ "from contextlib import contextmanager\nfrom functools import partial\nimport logging\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom .bayeserror import plot_nbe as nbe\nfrom .calibration import IsotonicCalibrator\nfrom .ece import plot_ece as ece\nfrom . import util\n\n\nLOG = logging.getLogger(__name__)\n\n\n# make matplotlib.pyplot behave more like axes objects\nplt.set_xlabel = plt.xlabel\nplt.set_ylabel = plt.ylabel\nplt.set_xlim = plt.xlim\nplt.set_ylim = plt.ylim\n\n\nclass Canvas:\n def __init__(self, ax):\n self.ax = ax\n\n self.calibrator_fit = partial(calibrator_fit, ax=ax)\n self.ece = partial(ece, ax=ax)\n self.lr_histogram = partial(lr_histogram, ax=ax)\n self.nbe = partial(nbe, ax=ax)\n self.pav = partial(pav, ax=ax)\n self.score_distribution = partial(score_distribution, ax=ax)\n self.tippett = partial(tippett, ax=ax)\n\n def __getattr__(self, attr):\n return getattr(self.ax, attr)\n\n\ndef savefig(path):\n \"\"\"\n Creates a plotting context, write plot when closed.\n\n Example\n -------\n ```py\n with savefig(filename) as ax:\n ax.pav(lrs, y)\n ```\n\n A call to `savefig(path)` is identical to `axes(savefig=path)`.\n\n Parameters\n ----------\n path : str\n write a PNG image to this path\n \"\"\"\n return axes(savefig=path)\n\n\ndef show():\n \"\"\"\n Creates a plotting context, show plot when closed.\n\n Example\n -------\n ```py\n with show() as ax:\n ax.pav(lrs, y)\n ```\n\n A call to `show()` is identical to `axes(show=True)`.\n \"\"\"\n return axes(show=True)\n\n\n@contextmanager\ndef axes(savefig=None, show=None):\n \"\"\"\n Creates a plotting context.\n\n Example\n -------\n ```py\n with axes() as ax:\n ax.pav(lrs, y)\n ```\n \"\"\"\n fig = plt.figure()\n try:\n yield Canvas(ax=plt)\n finally:\n if savefig:\n fig.savefig(savefig)\n if show:\n plt.show()\n plt.close(fig)\n\n\ndef pav(lrs, y, add_misleading=0, show_scatter=True, ax=plt):\n \"\"\"\n Generates a plot of pre- versus post-calibrated LRs using Pool Adjacent\n Violators (PAV).\n\n Parameters\n ----------\n lrs : numpy array of floats\n Likelihood ratios before PAV transform\n y : numpy array\n Labels corresponding to lrs (0 for Hd and 1 for Hp)\n add_misleading : int\n number of misleading evidence points to add on both sides (default: `0`)\n show_scatter : boolean\n If True, show individual LRs (default: `True`)\n ax : pyplot axes object\n defaults to `matplotlib.pyplot`\n ----------\n \"\"\"\n pav = IsotonicCalibrator(add_misleading=add_misleading)\n pav_lrs = pav.fit_transform(lrs, y)\n\n with np.errstate(divide='ignore'):\n llrs = np.log10(lrs)\n pav_llrs = np.log10(pav_lrs)\n\n xrange = yrange = [llrs[llrs != -np.Inf].min() - .5, llrs[llrs != np.Inf].max() + .5]\n\n # plot line through origin\n ax.plot(xrange, yrange)\n\n # line pre pav llrs x and post pav llrs y\n line_x = np.arange(*xrange, .01)\n with np.errstate(divide='ignore'):\n line_y = np.log10(pav.transform(10 ** line_x))\n\n # filter nan values, happens when values are out of bound (x_values out of training domain for pav)\n # see: https://scikit-learn.org/stable/modules/generated/sklearn.isotonic.IsotonicRegression.html\n line_x, line_y = line_x[~np.isnan(line_y)], line_y[~np.isnan(line_y)]\n\n # some values of line_y go beyond the yrange which is problematic when there are infinite values\n mask_out_of_range = np.logical_and(line_y >= yrange[0], line_y <= yrange[1])\n ax.plot(line_x[mask_out_of_range], line_y[mask_out_of_range])\n\n # add points for infinite values\n if np.logical_or(np.isinf(pav_llrs), np.isinf(llrs)).any():\n\n def adjust_ticks_labels_and_range(neg_inf, pos_inf, axis_range):\n ticks = np.linspace(axis_range[0], axis_range[1], 6).tolist()\n tick_labels = [str(round(tick, 1)) for tick in ticks]\n step_size = ticks[2] - ticks[1]\n\n axis_range = [axis_range[0] - (step_size * neg_inf),axis_range[1] + (step_size * pos_inf)]\n ticks = [axis_range[0]] * neg_inf + ticks + [axis_range[1]] * pos_inf\n tick_labels = ['-∞'] * neg_inf + tick_labels + ['+∞'] * pos_inf\n\n return axis_range, ticks, tick_labels\n\n def replace_values_out_of_range(values, min_range, max_range):\n # create margin for point so no overlap with axis line\n margin = (max_range - min_range) / 60\n return np.concatenate([np.where(np.isneginf(values), min_range + margin, values),\n np.where(np.isposinf(values), max_range - margin, values)])\n\n yrange, ticks_y, tick_labels_y = adjust_ticks_labels_and_range(np.isneginf(pav_llrs).any(),\n np.isposinf(pav_llrs).any(),\n yrange)\n xrange, ticks_x, tick_labels_x = adjust_ticks_labels_and_range(np.isneginf(llrs).any(),\n np.isposinf(llrs).any(),\n xrange)\n\n mask_not_inf = np.logical_or(np.isinf(llrs), np.isinf(pav_llrs))\n x_inf = replace_values_out_of_range(llrs[mask_not_inf], xrange[0], xrange[1])\n y_inf = replace_values_out_of_range(pav_llrs[mask_not_inf], yrange[0], yrange[1])\n\n ax.yticks(ticks_y, tick_labels_y)\n ax.xticks(ticks_x, tick_labels_x)\n\n ax.scatter(x_inf,\n y_inf, facecolors='none', edgecolors='#1f77b4', linestyle=':')\n\n ax.axis(xrange + yrange)\n # pre-/post-calibrated lr fit\n\n if show_scatter:\n ax.scatter(llrs, pav_llrs) # scatter plot of measured lrs\n\n ax.set_xlabel(\"pre-calibrated 10log LR\")\n ax.set_ylabel(\"post-calibrated 10log LR\")\n\n\ndef lr_histogram(lrs, y, bins=20, ax=plt):\n \"\"\"\n plots the 10log lrs\n \"\"\"\n log_lrs = np.log10(lrs)\n\n bins = np.histogram_bin_edges(log_lrs, bins=bins)\n points0, points1 = util.Xy_to_Xn(log_lrs, y)\n ax.hist(points0, bins=bins, alpha=.25, density=True)\n ax.hist(points1, bins=bins, alpha=.25, density=True)\n ax.set_xlabel('10log likelihood ratio')\n ax.set_ylabel('count')\n\n\ndef tippett(lrs, y, ax=plt):\n \"\"\"\n plots the 10log lrs\n \"\"\"\n log_lrs = np.log10(lrs)\n\n xplot = np.linspace(np.min(log_lrs), np.max(log_lrs), 100)\n lr_0, lr_1 = util.Xy_to_Xn(log_lrs, y)\n perc0 = (sum(i >= xplot for i in lr_0) / len(lr_0)) * 100\n perc1 = (sum(i >= xplot for i in lr_1) / len(lr_1)) * 100\n\n ax.plot(xplot, perc1, color='b', label='LRs given $\\mathregular{H_1}$')\n ax.plot(xplot, perc0, color='r', label='LRs given $\\mathregular{H_2}$')\n ax.axvline(x=0, color='k', linestyle='--')\n ax.set_xlabel('10log likelihood ratio')\n ax.set_ylabel('Cumulative proportion')\n ax.legend()\n\n\ndef score_distribution(scores, y, bins=20, ax=plt):\n \"\"\"\n plots the distributions of scores calculated by the (fitted) lr_system\n \"\"\"\n ax.rcParams.update({'font.size': 15})\n bins = np.histogram_bin_edges(scores[np.isfinite(scores)], bins=bins)\n\n # create weights vector so y-axis is between 0-1\n scores_by_class = [scores[y == cls] for cls in np.unique(y)]\n weights = [np.ones_like(data) / len(data) for data in scores_by_class]\n\n # adjust weights so largest value is 1\n for i, s in enumerate(scores_by_class):\n hist, _ = np.histogram(s, bins=np.r_[-np.inf, bins, np.inf], weights=weights[i])\n weights[i] = weights[i] * (1 / hist.max())\n\n # handle inf values\n if np.isinf(scores).any():\n prop_cycle = ax.rcParams['axes.prop_cycle']\n colors = prop_cycle.by_key()['color']\n x_range = np.linspace(min(bins), max(bins), 6).tolist()\n labels = [str(round(tick, 1)) for tick in x_range]\n step_size = x_range[2] - x_range[1]\n bar_width = step_size / 4\n plot_args_inf = []\n\n if np.isneginf(scores).any():\n x_range = [x_range[0] - step_size] + x_range\n labels = ['-∞'] + labels\n for i, s in enumerate(scores_by_class):\n if np.isneginf(s).any():\n plot_args_inf.append(\n (colors[i], x_range[0] + bar_width if i else x_range[0], np.sum(weights[i][np.isneginf(s)])))\n\n if np.isposinf(scores).any():\n x_range = x_range + [x_range[-1] + step_size]\n labels.append('∞')\n for i, s in enumerate(scores_by_class):\n if np.isposinf(s).any():\n plot_args_inf.append(\n (colors[i], x_range[-1] - bar_width if i else x_range[-1], np.sum(weights[i][np.isposinf(s)])))\n\n ax.xticks(x_range, labels)\n\n for color, x_coord, y_coord in plot_args_inf:\n ax.bar(x_coord, y_coord, width=bar_width, color=color, alpha=0.25, hatch='/')\n\n for cls, weight in zip(np.unique(y), weights):\n ax.hist(scores[y == cls], bins=bins, alpha=.25,\n label=f'class {cls}', weights=weight)\n\n\ndef calibrator_fit(calibrator, score_range=(0, 1), resolution=100, ax=plt):\n \"\"\"\n plots the fitted score distributions/score-to-posterior map\n (Note - for ELUBbounder calibrator is the firststepcalibrator)\n\n TODO: plot multiple calibrators at once\n \"\"\"\n ax.rcParams.update({'font.size': 15})\n\n x = np.linspace(score_range[0], score_range[1], resolution)\n calibrator.transform(x)\n\n ax.plot(x, calibrator.p1, label='fit class 1')\n ax.plot(x, calibrator.p0, label='fit class 0')\n" ]
[ [ "numpy.linspace", "numpy.isneginf", "numpy.max", "numpy.histogram", "numpy.ones_like", "numpy.unique", "numpy.arange", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.min", "numpy.isnan", "numpy.log10", "numpy.errstate", "numpy.histogram_bin_edges", "numpy.logical_and", "matplotlib.pyplot.show", "numpy.isfinite", "numpy.isposinf", "numpy.isinf" ] ]
mgzhao/DLshoppingcart
[ "4a34beebdd5996ef39be184fbf1b9e23c9e9d436" ]
[ "run.py" ]
[ "# Copyright (c) 2017 NVIDIA Corporation\nimport torch\nimport argparse\nfrom reco_encoder.data import input_layer\nfrom reco_encoder.model import model\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import MultiStepLR\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport copy\nimport time\nfrom pathlib import Path\nfrom logger import Logger\nfrom math import sqrt\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='RecoEncoder')\nparser.add_argument('--lr', type=float, default=0.00001, metavar='N',\n help='learning rate')\nparser.add_argument('--weight_decay', type=float, default=0.0, metavar='N',\n help='L2 weight decay')\nparser.add_argument('--drop_prob', type=float, default=0.0, metavar='N',\n help='dropout drop probability')\nparser.add_argument('--noise_prob', type=float, default=0.0, metavar='N',\n help='noise probability')\nparser.add_argument('--batch_size', type=int, default=64, metavar='N',\n help='global batch size')\nparser.add_argument('--summary_frequency', type=int, default=100, metavar='N',\n help='how often to save summaries')\nparser.add_argument('--aug_step', type=int, default=-1, metavar='N',\n help='do data augmentation every X step')\nparser.add_argument('--constrained', action='store_true',\n help='constrained autoencoder')\nparser.add_argument('--skip_last_layer_nl', action='store_true',\n help='if present, decoder\\'s last layer will not apply non-linearity function')\nparser.add_argument('--num_epochs', type=int, default=50, metavar='N',\n help='maximum number of epochs')\nparser.add_argument('--optimizer', type=str, default=\"adam\", metavar='N',\n help='optimizer kind: adam, momentum, adagrad or rmsprop')\nparser.add_argument('--hidden_layers', type=str, default=\"1024,512,512,128\", metavar='N',\n help='hidden layer sizes, comma-separated')\nparser.add_argument('--gpu_ids', type=str, default=\"0\", metavar='N',\n help='comma-separated gpu ids to use for data parallel training')\nparser.add_argument('--path_to_train_data', type=str, default=\"\", metavar='N',\n help='Path to training data')\nparser.add_argument('--path_to_eval_data', type=str, default=\"\", metavar='N',\n help='Path to evaluation data')\nparser.add_argument('--non_linearity_type', type=str, default=\"selu\", metavar='N',\n help='type of the non-linearity used in activations')\nparser.add_argument('--logdir', type=str, default=\"logs\", metavar='N',\n help='where to save model and write logs')\n\nargs = parser.parse_args()\nprint(args)\n\ndef do_eval(encoder, evaluation_data_layer):\n encoder.eval()\n denom = 0.0\n total_epoch_loss = 0.0\n for i, (eval, src) in enumerate(evaluation_data_layer.iterate_one_epoch_eval()):\n inputs = Variable(src.cuda().to_dense())\n targets = Variable(eval.cuda().to_dense())\n outputs = encoder(inputs)\n loss, num_ratings = model.MSEloss(outputs, targets)\n total_epoch_loss += loss.data[0]\n denom += num_ratings.data[0]\n return sqrt(total_epoch_loss / denom)\n\ndef log_var_and_grad_summaries(logger, layers, global_step, prefix, log_histograms=False):\n \"\"\"\n Logs variable and grad stats for layer. Transfers data from GPU to CPU automatically\n :param logger: TB logger\n :param layers: param list\n :param global_step: global step for TB\n :param prefix: name prefix\n :param log_histograms: (default: False) whether or not log histograms\n :return:\n \"\"\"\n for ind, w in enumerate(layers):\n # Variables\n w_var = w.data.cpu().numpy()\n logger.scalar_summary(\"Variables/FrobNorm/{}_{}\".format(prefix, ind), np.linalg.norm(w_var),\n global_step)\n if log_histograms:\n logger.histo_summary(tag=\"Variables/{}_{}\".format(prefix, ind), values=w.data.cpu().numpy(),\n step=global_step)\n\n # Gradients\n w_grad = w.grad.data.cpu().numpy()\n logger.scalar_summary(\"Gradients/FrobNorm/{}_{}\".format(prefix, ind), np.linalg.norm(w_grad),\n global_step)\n if log_histograms:\n logger.histo_summary(tag=\"Gradients/{}_{}\".format(prefix, ind), values=w.grad.data.cpu().numpy(),\n step=global_step)\n\ndef main():\n logger = Logger(args.logdir)\n params = dict()\n params['batch_size'] = args.batch_size\n params['data_dir'] = args.path_to_train_data\n params['major'] = 'users'\n params['itemIdInd'] = 1\n params['userIdInd'] = 0\n print(\"Loading training data\")\n data_layer = input_layer.UserItemRecDataProvider(params=params)\n print(\"Data loaded\")\n print(\"Total items found: {}\".format(len(data_layer.data.keys())))\n print(\"Vector dim: {}\".format(data_layer.vector_dim))\n\n print(\"Loading eval data\")\n eval_params = copy.deepcopy(params)\n # must set eval batch size to 1 to make sure no examples are missed\n eval_params['data_dir'] = args.path_to_eval_data\n eval_data_layer = input_layer.UserItemRecDataProvider(params=eval_params,\n user_id_map=data_layer.userIdMap, # the mappings are provided\n item_id_map=data_layer.itemIdMap)\n \n eval_data_layer.src_data = data_layer.data\n rencoder = model.AutoEncoder(layer_sizes=[data_layer.vector_dim] + [int(l) for l in args.hidden_layers.split(',')],\n nl_type=args.non_linearity_type,\n is_constrained=args.constrained,\n dp_drop_prob=args.drop_prob,\n last_layer_activations=not args.skip_last_layer_nl)\n\n model_checkpoint = args.logdir + \"/model\"\n path_to_model = Path(model_checkpoint)\n if path_to_model.is_file():\n print(\"Loading model from: {}\".format(model_checkpoint))\n rencoder.load_state_dict(torch.load(model_checkpoint))\n\n print('######################################################')\n print('######################################################')\n print('############# AutoEncoder Model: #####################')\n print(rencoder)\n print('######################################################')\n print('######################################################')\n\n gpu_ids = [int(g) for g in args.gpu_ids.split(',')]\n print('Using GPUs: {}'.format(gpu_ids))\n if len(gpu_ids)>1:\n rencoder = nn.DataParallel(rencoder,\n device_ids=gpu_ids)\n rencoder = rencoder.cuda()\n\n if args.optimizer == \"adam\":\n optimizer = optim.Adam(rencoder.parameters(),\n lr=args.lr,\n weight_decay=args.weight_decay)\n elif args.optimizer == \"adagrad\":\n optimizer = optim.Adagrad(rencoder.parameters(),\n lr=args.lr,\n weight_decay=args.weight_decay)\n elif args.optimizer == \"momentum\":\n optimizer = optim.SGD(rencoder.parameters(),\n lr=args.lr, momentum=0.9,\n weight_decay=args.weight_decay)\n scheduler = MultiStepLR(optimizer, milestones=[24, 36, 48, 66, 72], gamma=0.5)\n elif args.optimizer == \"rmsprop\":\n optimizer = optim.RMSprop(rencoder.parameters(),\n lr=args.lr, momentum=0.9,\n weight_decay=args.weight_decay)\n else:\n raise ValueError('Unknown optimizer kind')\n\n t_loss = 0.0\n t_loss_denom = 0.0\n global_step = 0\n\n if args.noise_prob > 0.0:\n dp = nn.Dropout(p=args.noise_prob)\n\n for epoch in range(args.num_epochs):\n print('Doing epoch {} of {}'.format(epoch, args.num_epochs))\n e_start_time = time.time()\n rencoder.train()\n total_epoch_loss = 0.0\n denom = 0.0\n if args.optimizer == \"momentum\":\n scheduler.step()\n for i, mb in enumerate(data_layer.iterate_one_epoch()):\n inputs = Variable(mb.cuda().to_dense())\n optimizer.zero_grad()\n outputs = rencoder(inputs)\n loss, num_ratings = model.MSEloss(outputs, inputs)\n loss = loss / num_ratings\n loss.backward()\n optimizer.step()\n global_step += 1\n t_loss += loss.data[0]\n t_loss_denom += 1\n\n if i % args.summary_frequency == 0:\n print('[%d, %5d] RMSE: %.7f' % (epoch, i, sqrt(t_loss / t_loss_denom)))\n logger.scalar_summary(\"Training_RMSE\", sqrt(t_loss/t_loss_denom), global_step)\n t_loss = 0\n t_loss_denom = 0.0\n log_var_and_grad_summaries(logger, rencoder.encode_w, global_step, \"Encode_W\")\n log_var_and_grad_summaries(logger, rencoder.encode_b, global_step, \"Encode_b\")\n if not rencoder.is_constrained:\n log_var_and_grad_summaries(logger, rencoder.decode_w, global_step, \"Decode_W\")\n log_var_and_grad_summaries(logger, rencoder.decode_b, global_step, \"Decode_b\")\n\n total_epoch_loss += loss.data[0]\n denom += 1\n\n #if args.aug_step > 0 and i % args.aug_step == 0 and i > 0:\n if args.aug_step > 0:\n # Magic data augmentation trick happen here\n for t in range(args.aug_step):\n inputs = Variable(outputs.data)\n if args.noise_prob > 0.0:\n inputs = dp(inputs)\n optimizer.zero_grad()\n outputs = rencoder(inputs)\n loss, num_ratings = model.MSEloss(outputs, inputs)\n loss = loss / num_ratings\n loss.backward()\n optimizer.step()\n\n e_end_time = time.time()\n print('Total epoch {} finished in {} seconds with TRAINING RMSE loss: {}'\n .format(epoch, e_end_time - e_start_time, sqrt(total_epoch_loss/denom)))\n logger.scalar_summary(\"Training_RMSE_per_epoch\", sqrt(total_epoch_loss/denom), epoch)\n logger.scalar_summary(\"Epoch_time\", e_end_time - e_start_time, epoch)\n if epoch % 3 == 0 or epoch == args.num_epochs - 1:\n eval_loss = do_eval(rencoder, eval_data_layer)\n print('Epoch {} EVALUATION LOSS: {}'.format(epoch, eval_loss))\n logger.scalar_summary(\"EVALUATION_RMSE\", eval_loss, epoch)\n print(\"Saving model to {}\".format(model_checkpoint + \".epoch_\"+str(epoch)))\n torch.save(rencoder.state_dict(), model_checkpoint + \".epoch_\"+str(epoch))\n\n print(\"Saving model to {}\".format(model_checkpoint + \".last\"))\n torch.save(rencoder.state_dict(), model_checkpoint + \".last\")\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.optim.lr_scheduler.MultiStepLR", "torch.nn.Dropout", "torch.load", "numpy.linalg.norm", "torch.nn.DataParallel", "torch.autograd.Variable" ] ]
vipulraheja/transformers
[ "864c1dfe34e43038fcd2289505f5cc7acd65ad2e" ]
[ "src/transformers/models/albert/modeling_albert.py" ]
[ "# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch ALBERT model. \"\"\"\n\nimport math\nimport os\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPooling,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_albert import AlbertConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"albert-base-v2\"\n_CONFIG_FOR_DOC = \"AlbertConfig\"\n_TOKENIZER_FOR_DOC = \"AlbertTokenizer\"\n\n\nALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"albert-base-v1\",\n \"albert-large-v1\",\n \"albert-xlarge-v1\",\n \"albert-xxlarge-v1\",\n \"albert-base-v2\",\n \"albert-large-v2\",\n \"albert-xlarge-v2\",\n \"albert-xxlarge-v2\",\n # See all ALBERT models at https://huggingface.co/models?filter=albert\n]\n\n\ndef load_tf_weights_in_albert(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n print(name)\n\n for name, array in zip(names, arrays):\n original_name = name\n\n # If saved from the TF HUB module\n name = name.replace(\"module/\", \"\")\n\n # Renaming and simplifying\n name = name.replace(\"ffn_1\", \"ffn\")\n name = name.replace(\"bert/\", \"albert/\")\n name = name.replace(\"attention_1\", \"attention\")\n name = name.replace(\"transform/\", \"\")\n name = name.replace(\"LayerNorm_1\", \"full_layer_layer_norm\")\n name = name.replace(\"LayerNorm\", \"attention/LayerNorm\")\n name = name.replace(\"transformer/\", \"\")\n\n # The feed forward layer had an 'intermediate' step which has been abstracted away\n name = name.replace(\"intermediate/dense/\", \"\")\n name = name.replace(\"ffn/intermediate/output/dense/\", \"ffn_output/\")\n\n # ALBERT attention was split between self and output which have been abstracted away\n name = name.replace(\"/output/\", \"/\")\n name = name.replace(\"/self/\", \"/\")\n\n # The pooler is a linear layer\n name = name.replace(\"pooler/dense\", \"pooler\")\n\n # The classifier was simplified to predictions from cls/predictions\n name = name.replace(\"cls/predictions\", \"predictions\")\n name = name.replace(\"predictions/attention\", \"predictions\")\n\n # Naming was changed to be more explicit\n name = name.replace(\"embeddings/attention\", \"embeddings\")\n name = name.replace(\"inner_group_\", \"albert_layers/\")\n name = name.replace(\"group_\", \"albert_layer_groups/\")\n\n # Classifier\n if len(name.split(\"/\")) == 1 and (\"output_bias\" in name or \"output_weights\" in name):\n name = \"classifier/\" + name\n\n # No ALBERT model currently handles the next sentence prediction task\n if \"seq_relationship\" in name:\n name = name.replace(\"seq_relationship/output_\", \"sop_classifier/classifier/\")\n name = name.replace(\"weights\", \"weight\")\n\n name = name.split(\"/\")\n\n # Ignore the gradients applied by the LAMB/ADAM optimizers.\n if (\n \"adam_m\" in name\n or \"adam_v\" in name\n or \"AdamWeightDecayOptimizer\" in name\n or \"AdamWeightDecayOptimizer_1\" in name\n or \"global_step\" in name\n ):\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(f\"Initialize PyTorch weight {name} from {original_name}\")\n pointer.data = torch.from_numpy(array)\n\n return model\n\n\nclass AlbertEmbeddings(nn.Module):\n \"\"\"\n Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n\n # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass AlbertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads}\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n self.attention_head_size = config.hidden_size // config.num_attention_heads\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.output_dropout = nn.Dropout(config.hidden_dropout_prob)\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.pruned_heads = set()\n\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n # Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.query = prune_linear_layer(self.query, index)\n self.key = prune_linear_layer(self.key, index)\n self.value = prune_linear_layer(self.value, index)\n self.dense = prune_linear_layer(self.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.num_attention_heads = self.num_attention_heads - len(heads)\n self.all_head_size = self.attention_head_size * self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n seq_length = hidden_states.size()[1]\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.attention_dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n\n # Should find a better way to do this\n w = (\n self.dense.weight.t()\n .view(self.num_attention_heads, self.attention_head_size, self.hidden_size)\n .to(context_layer.dtype)\n )\n b = self.dense.bias.to(context_layer.dtype)\n\n projected_context_layer = torch.einsum(\"bfnd,ndh->bfh\", context_layer, w) + b\n projected_context_layer_dropout = self.output_dropout(projected_context_layer)\n layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout)\n return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)\n\n\nclass AlbertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.config = config\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.attention = AlbertAttention(config)\n self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)\n self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)\n self.activation = ACT2FN[config.hidden_act]\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(\n self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False\n ):\n attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)\n\n ffn_output = apply_chunking_to_forward(\n self.ff_chunk,\n self.chunk_size_feed_forward,\n self.seq_len_dim,\n attention_output[0],\n )\n hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])\n\n return (hidden_states,) + attention_output[1:] # add attentions if we output them\n\n def ff_chunk(self, attention_output):\n ffn_output = self.ffn(attention_output)\n ffn_output = self.activation(ffn_output)\n ffn_output = self.ffn_output(ffn_output)\n return ffn_output\n\n\nclass AlbertLayerGroup(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])\n\n def forward(\n self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False\n ):\n layer_hidden_states = ()\n layer_attentions = ()\n\n for layer_index, albert_layer in enumerate(self.albert_layers):\n layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)\n hidden_states = layer_output[0]\n\n if output_attentions:\n layer_attentions = layer_attentions + (layer_output[1],)\n\n if output_hidden_states:\n layer_hidden_states = layer_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if output_hidden_states:\n outputs = outputs + (layer_hidden_states,)\n if output_attentions:\n outputs = outputs + (layer_attentions,)\n return outputs # last-layer hidden state, (layer hidden states), (layer attentions)\n\n\nclass AlbertTransformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.config = config\n self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)\n self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n hidden_states = self.embedding_hidden_mapping_in(hidden_states)\n\n all_hidden_states = (hidden_states,) if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n for i in range(self.config.num_hidden_layers):\n # Number of layers in a hidden group\n layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)\n\n # Index of the hidden group\n group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))\n\n layer_group_output = self.albert_layer_groups[group_idx](\n hidden_states,\n attention_mask,\n head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],\n output_attentions,\n output_hidden_states,\n )\n hidden_states = layer_group_output[0]\n\n if output_attentions:\n all_attentions = all_attentions + layer_group_output[-1]\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\nclass AlbertPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = AlbertConfig\n base_model_prefix = \"albert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\n@dataclass\nclass AlbertForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.AlbertForPreTraining`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss as the sum of the masked language modeling loss and the next sequence prediction\n (classification) loss.\n prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n sop_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation\n before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n prediction_logits: torch.FloatTensor = None\n sop_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nALBERT_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Args:\n config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nALBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.AlbertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertModel(AlbertPreTrainedModel):\n\n config_class = AlbertConfig\n load_tf_weights = load_tf_weights_in_albert\n base_model_prefix = \"albert\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n\n self.config = config\n self.embeddings = AlbertEmbeddings(config)\n self.encoder = AlbertTransformer(config)\n if add_pooling_layer:\n self.pooler = nn.Linear(config.hidden_size, config.hidden_size)\n self.pooler_activation = nn.Tanh()\n else:\n self.pooler = None\n self.pooler_activation = None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has\n a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT\n model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.\n\n These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,\n while [2,3] correspond to the two inner groups of the second hidden layer.\n\n Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more\n information about head pruning\n \"\"\"\n for layer, heads in heads_to_prune.items():\n group_idx = int(layer / self.config.inner_group_num)\n inner_group_idx = int(layer - group_idx * self.config.inner_group_num)\n self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPooling,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n encoder_outputs = self.encoder(\n embedding_output,\n extended_attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = encoder_outputs[0]\n\n pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a\n `sentence order prediction (classification)` head.\n \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForPreTraining(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config)\n self.predictions = AlbertMLMHead(config)\n self.sop_classifier = AlbertSOPHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.predictions.decoder = new_embeddings\n\n def get_input_embeddings(self):\n return self.albert.embeddings.word_embeddings\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n sentence_order_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n sentence_order_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair\n (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``. ``0`` indicates original order (sequence\n A, then sequence B), ``1`` indicates switched order (sequence B, then sequence A).\n\n Returns:\n\n Example::\n\n >>> from transformers import AlbertTokenizer, AlbertForPreTraining\n >>> import torch\n\n >>> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')\n >>> model = AlbertForPreTraining.from_pretrained('albert-base-v2')\n\n >>> input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n >>> outputs = model(input_ids)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> sop_logits = outputs.sop_logits\n\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output, pooled_output = outputs[:2]\n\n prediction_scores = self.predictions(sequence_output)\n sop_scores = self.sop_classifier(pooled_output)\n\n total_loss = None\n if labels is not None and sentence_order_label is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))\n total_loss = masked_lm_loss + sentence_order_loss\n\n if not return_dict:\n output = (prediction_scores, sop_scores) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return AlbertForPreTrainingOutput(\n loss=total_loss,\n prediction_logits=prediction_scores,\n sop_logits=sop_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass AlbertMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.LayerNorm = nn.LayerNorm(config.embedding_size)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n self.dense = nn.Linear(config.hidden_size, config.embedding_size)\n self.decoder = nn.Linear(config.embedding_size, config.vocab_size)\n self.activation = ACT2FN[config.hidden_act]\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.activation(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n hidden_states = self.decoder(hidden_states)\n\n prediction_scores = hidden_states\n\n return prediction_scores\n\n\nclass AlbertSOPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.dropout = nn.Dropout(config.classifier_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, pooled_output):\n dropout_pooled_output = self.dropout(pooled_output)\n logits = self.classifier(dropout_pooled_output)\n return logits\n\n\n@add_start_docstrings(\n \"Albert Model with a `language modeling` head on top.\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForMaskedLM(AlbertPreTrainedModel):\n\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config, add_pooling_layer=False)\n self.predictions = AlbertMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.predictions.decoder = new_embeddings\n\n def get_input_embeddings(self):\n return self.albert.embeddings.word_embeddings\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_outputs = outputs[0]\n\n prediction_scores = self.predictions(sequence_outputs)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForSequenceClassification(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.albert = AlbertModel(config)\n self.dropout = nn.Dropout(config.classifier_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ...,\n config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForTokenClassification(AlbertPreTrainedModel):\n\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config, add_pooling_layer=False)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForQuestionAnswering(AlbertPreTrainedModel):\n\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForMultipleChoice(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where `num_choices` is the size of the second dimension of the input tensors. (see\n `input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n" ]
[ [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.zeros", "torch.einsum", "torch.from_numpy", "torch.nn.Embedding", "torch.nn.LayerNorm", "tensorflow.train.load_variable", "torch.nn.Linear", "torch.matmul", "torch.nn.Tanh", "numpy.transpose", "torch.arange", "torch.nn.BCEWithLogitsLoss", "tensorflow.train.list_variables", "torch.nn.MSELoss" ] ]
masukai/AutoColorIntensity
[ "7cba8105eed5bbc708c0fbb1c4841325aae33ed5" ]
[ "AutoColorIntensity.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf8 -*-\nimport os\nimport glob\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\nimport time\n\n# _listはリスト\n# np_はnp.arrayに格納されている\n\n\ndef main(scale, ext, name, HSV_u, HSV_l, cl, gauk, gaun, closing_on): # メイン関数\n start_time = time.time()\n\n # 変数調整はここで行う\n PtoC = 1.0 / scale # pixel to cm ImageJ等で前もって計測\n # 校正の必要あり。複数枚で確認が要必要。\n extension = ext # 拡張子は調節して使う\n size_ex = int(len(extension)) * -1\n # binaryとclosingの調節はMainPGArea内で直接行うこと\n\n # 以下メインの流れ\n folder_name = name\n path = \"./\" + folder_name\n os.chdir(path)\n name_list, area_list = procedure(\n PtoC, extension, size_ex, folder_name, HSV_u, HSV_l, cl, gauk, gaun, closing_on)\n os.chdir(\"../\")\n savefile(folder_name, name_list, area_list)\n\n # 計測(実測)値と計算値の比較用 基本的にコメントアウト\n # verification(folder_name)\n\n if __name__ != 'AutoColorIntensity':\n print(\">>> complete {0:.2f} sec <<<\".format(time.time() - start_time))\n return\n else:\n return time.time() - start_time\n\n\ndef procedure(PtoC, extension, size_ex, folder_name, HSV_u, HSV_l, cl, gauk, gaun, closing_on):\n jpg_list = glob.glob(\"*{0}\".format(extension)) # JPGの探索とループ\n name_list = []\n area_list = []\n for i in range(len(jpg_list)):\n my_file = jpg_list[i]\n name = my_file[:size_ex]\n name_list.append(name)\n if __name__ != 'AutoArea':\n print(\"{0}/{1}: {2}\".format(i + 1, len(jpg_list), name))\n img = cv2.imread(my_file)\n obj = MainPGArea(name, img, folder_name, HSV_u,\n HSV_l, cl, gauk, gaun, closing_on)\n area_list.append(round(obj.pixels * (PtoC ** 2), 2)) # 小数点以下2桁\n return name_list, area_list\n\n\ndef savefile(folder_name, name_list, area_list):\n savecsv_buffer = np.array([name_list, area_list])\n savecsv = savecsv_buffer[:, np.argsort(savecsv_buffer[0])].T\n with open(\"{0}_calculated_area.csv\".format(folder_name), \"w\") as f:\n writer = csv.writer(f, lineterminator=\"\\n\")\n for i in range(len(name_list)):\n writer.writerow(savecsv[i])\n return\n\n\ndef verification(folder_name):\n print(\"Start Verification\")\n np_mea = np.loadtxt('measured_area.csv', delimiter=',', usecols=(1))\n np_cal = np.loadtxt('{0}_calculated_area.csv'.format(\n folder_name), delimiter=',', usecols=(1))\n print(\"Measured: {0}\".format(np_mea))\n print(\"Calculated: {0}\".format(np_cal))\n\n # 可視化\n np_check = np.array([-10000, 10000])\n\n coef_1 = np.polyfit(np_mea, np_cal, 1)\n print(\"y = ax + b\")\n print(\"a: {0}\".format(coef_1[0]))\n print(\"b: {0}\".format(coef_1[1]))\n y_pred_1 = coef_1[0] * np_check + coef_1[1]\n\n ax = plt.figure(num=0, dpi=360).gca()\n ax.set_title(\"Verification\", fontsize=14)\n ax.scatter(np_mea, np_cal, s=2, color=\"red\", label=\"Verification\")\n ax.scatter(np.mean(np_mea), np.mean(np_cal), s=40,\n marker=\"*\", color=\"purple\", label=\"Mean Value\")\n ax.plot(np_check, y_pred_1, linewidth=1, color=\"red\",\n label=\"fitting: y={0:.2f}x+{1:.2f}\".format(coef_1[0], coef_1[1])) # 最小2乗法 1次式\n ax.plot(np_check, np_check, linewidth=1, color=\"black\", label=\"y=x\")\n plt.grid(which='major')\n plt.legend()\n ax.set_xlim([0, 3000])\n ax.set_ylim([0, 3000])\n ax.set_xlabel('Measured', fontsize=14)\n ax.set_ylabel('Calculated', fontsize=14)\n ax.set_aspect('equal', adjustable='box')\n plt.savefig(\"Verification.png\", bbox_inches='tight', pad_inches=0.1)\n plt.pause(0.3) # 計算速度を上げる場合はコメントアウト\n plt.clf()\n return\n\n\nclass MainPGArea: # 色調に差があり、輪郭になる場合HSVに変換>>>2値化して判別\n def __init__(self, file_name, img, folder_name, HSV_u, HSV_l, cl, gauk, gaun, closing_on):\n self.file_name = file_name\n self.img = img\n self.folder_name = folder_name\n self.HSV_u = HSV_u\n self.HSV_l = HSV_l\n self.cl = cl\n self.gauk = gauk\n self.gaun = gaun\n self.closing_on = closing_on\n self.pixels = 0\n self.hsv_transration()\n self.gauss_transration()\n self.hsv_binary()\n if self.closing_on:\n self.closing()\n self.intensity()\n self.BGR()\n self.GRAY()\n self.save_image()\n self.calculation_area()\n\n def hsv_transration(self): # 色調変換\n self.hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)\n return\n\n def gauss_transration(self): # ガウス変換\n self.gauss = cv2.GaussianBlur(\n self.hsv, (self.gauk, self.gauk), self.gaun) # フィルタの大きさ\n return\n\n def hsv_binary(self): # HSV制限2値化\n lower = np.array(self.HSV_l) # 下限 0 0 0\n upper = np.array(self.HSV_u) # 上限 180 255 255\n self.bin = cv2.inRange(self.gauss, lower, upper)\n return\n\n def closing(self): # 膨張収縮処理により穴埋め\n kernel = np.ones((self.cl, self.cl), np.uint8)\n self.cl = cv2.morphologyEx(self.bin, cv2.MORPH_CLOSE, kernel)\n return\n\n def intensity(self):\n if self.closing_on:\n filter = np.array([self.cl] * 3)\n else:\n filter = np.array([self.bin] * 3)\n filter = np.transpose(filter, (1, 2, 0))\n self.check = self.hsv * filter\n return\n\n def BGR(self):\n self.bgr = cv2.cvtColor(self.check, cv2.COLOR_HSV2RGB_FULL)\n # self.bgr = cv2.cvtColor(self.check, cv2.COLOR_HSV2BGR)\n return\n\n def GRAY(self):\n gamma22LUT = np.array([pow(x / 255.0, 2.2) for x in range(256)],\n dtype='float32')\n img_bgrL = cv2.LUT(self.bgr, gamma22LUT)\n img_grayL = cv2.cvtColor(img_bgrL, cv2.COLOR_BGR2GRAY)\n self.gray = pow(img_grayL, 1.0 / 2.2) * 255\n data = np.ravel(self.gray)\n data_0 = data[data > 0]\n path = \"../{0}_save_values\".format(self.folder_name)\n os.makedirs(path, exist_ok=True)\n os.chdir(path)\n with open(\"{0}_pixels_value.csv\".format(self.file_name), \"w\") as f:\n writer = csv.writer(f, lineterminator=\"\\n\")\n writer.writerow([\"mean\", np.mean(data_0)])\n writer.writerow([\"values\"])\n for i in range(len(data_0)):\n writer.writerow([data_0[i]])\n os.chdir(\"../{0}\".format(self.folder_name))\n # print(data_0)\n\n hist, edges = np.histogram(data_0, bins=16, density=True)\n w = edges[1] - edges[0]\n hist = hist * w\n path = \"../{0}_save_hist\".format(self.folder_name)\n os.makedirs(path, exist_ok=True)\n os.chdir(path)\n plt.bar(edges[:-1], hist, w, color=\"gray\", edgecolor=\"black\")\n plt.plot([np.mean(data_0), np.mean(data_0)], [0, 1], color=\"red\",\n label=\"mean: {0:.2f}\".format(np.mean(data_0)))\n plt.xlabel(\"Grayscale values\")\n plt.ylabel(\"Probability of occurrence, bins=16\")\n plt.ylim(0, 0.5)\n plt.legend()\n plt.savefig(\"hist_{0}.png\".format(self.file_name), dpi=360, bbox_inches='tight', pad_inches=0.1)\n # plt.pause(0.3) # 計算速度を上げる場合はコメントアウト\n plt.clf()\n os.chdir(\"../{0}\".format(self.folder_name))\n return\n\n def save_image(self): # 画像の保存\n path = \"../{0}_save_image\".format(self.folder_name)\n os.makedirs(path, exist_ok=True)\n os.chdir(path)\n # cv2.imwrite(\"{0}_hsv.jpg\".format(self.file_name), self.hsv)\n # cv2.imwrite(\"{0}_gauss.jpg\".format(self.file_name), self.gauss)\n # cv2.imwrite(\"{0}_bin.jpg\".format(self.file_name), self.bin)\n if self.closing_on:\n cv2.imwrite(\"{0}_cl.jpg\".format(self.file_name), self.cl)\n else:\n cv2.imwrite(\"{0}_bin.jpg\".format(self.file_name), self.bin)\n # cv2.imwrite(\"{0}_check.jpg\".format(self.file_name), self.check)\n cv2.imwrite(\"{0}_bgr.jpg\".format(self.file_name), self.bgr)\n cv2.imwrite(\"{0}_gray.jpg\".format(self.file_name), self.gray)\n os.chdir(\"../{0}\".format(self.folder_name))\n return\n\n def calculation_area(self): # 面積pixel分の計算\n if self.closing_on:\n self.pixels = cv2.countNonZero(self.cl) # 計算する画像の名前に変更\n else:\n self.pixels = cv2.countNonZero(self.bin)\n return\n\n\nif __name__ == '__main__':\n scale = 28.3889\n ext = \".jpg\"\n name = \"photo\"\n HSV_u = [36, 255, 95]\n HSV_l = [3, 48, 0]\n cl = 19\n gauk = 15\n gaun = 3\n closing_on = True\n main(scale, ext, name, HSV_u, HSV_l, cl, gauk, gaun, closing_on)\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.polyfit", "numpy.mean", "numpy.histogram", "numpy.ravel", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "numpy.transpose", "numpy.argsort", "numpy.array", "matplotlib.pyplot.ylabel", "numpy.ones", "matplotlib.pyplot.clf", "matplotlib.pyplot.grid", "matplotlib.pyplot.bar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.pause", "numpy.loadtxt" ] ]
mandarvast19/ga-learner-dsmp-repo
[ "d944ce257ae54c51f4489dc5182ef84819dcb263" ]
[ "Visualization-for-Company-Stakeholders/code.py" ]
[ "# --------------\n#Importing header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndata=pd.read_csv(path)\r\n#Code starts here\r\nloan_status = data['Loan_Status'].value_counts()\r\nloan_status.plot(kind='bar')\n\n\n# --------------\n#Code starts here\r\nproperty_and_loan = data.groupby(['Property_Area','Loan_Status']).size().unstack()\r\nproperty_and_loan.plot(kind='bar',stacked=False)\r\nplt.xlabel('Property Area')\r\nplt.ylabel('Loan Status')\r\nplt.xticks(rotation=45)\r\nplt.show()\n\n\n# --------------\n#Code starts here\r\neducation_and_loan = data.groupby(['Education','Loan_Status']).size().unstack()\r\neducation_and_loan.plot(kind='bar')\r\nplt.xlabel('Education Status')\r\nplt.ylabel('Loan Status')\r\nplt.xticks(rotation=45)\r\nplt.show()\n\n\n# --------------\n#Code starts here\r\ngraduate = data[data['Education']=='Graduate']\r\nnot_graduate = data[data['Education']=='Not Graduate']\r\ndata.plot(kind='density',label='Graduate')\r\ndata.plot(kind='density',label='Not Graduate')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#Code ends here\r\n\r\n#For automatic legend display\r\nplt.legend()\n\n\n# --------------\n#Code starts here\r\nfig,(ax_1,ax_2,ax_3) = plt.subplots(1,3)\r\nal = data.groupby(['ApplicantIncome','LoanAmount']).size().unstack()\r\ndata.plot.scatter(x='ApplicantIncome',y='LoanAmount',ax=ax_1)\r\nplt.title('Applicant Income')\r\ndata.plot.scatter(x='CoapplicantIncome',y='LoanAmount',ax=ax_2)\r\nplt.title('Coapplicant Income')\r\ndata['TotalIncome']=data['ApplicantIncome']+data['CoapplicantIncome']\r\ndata.plot.scatter(x='TotalIncome',y='LoanAmount',ax=ax_3)\r\nplt.title('Total Income')\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
Ollehto/ox_lib
[ "2326aad94be4635b43d70fbedc63e669b2643019" ]
[ "lib/functions.py" ]
[ "import numpy as np\n\ndef get_board(state, b0=None):\n\tif b0 is None:\n\t\trepr_array = np.empty(9, dtype=np.int8)\n\telse:\n\t\trepr_array = b0.ravel()\n\tfor n in range(0, 8):\n\t\tnew_state = state // (3**(8-n))\n\t\trepr_array[8-n] = new_state\n\t\tstate -= new_state * (3**(8-n))\n\trepr_array[0] = state\n\tif b0 is None:\n\t\treturn repr_array.reshape((3, 3))\n\ndef get_state(board):\n\tstate = board.ravel().dot(np.power(3, np.arange(9)))\n\treturn state\n\ndef decide_win(board):\n\tfor p in (1, 2):\n\t\tboard_bool = board == p\n\t\tif np.any(np.all(board_bool, axis=1)) or np.any(np.all(board_bool, axis=0)) or np.all(board_bool[(0, 1, 2), (0, 1, 2)]) or np.all(board_bool[(2, 1, 0), (0, 1, 2)]):\n\t\t\treturn p\n\tif np.sum(board == 0) == 0:\n\t\treturn -1\n\telse:\n\t\treturn 0\n\ndef list_empty_indices(board):\n\treturn np.where(board.ravel() == 0)[0]" ]
[ [ "numpy.all", "numpy.arange", "numpy.sum", "numpy.empty" ] ]
animeshramesh/super-resolution
[ "59d005d23387cb6f382c8eeff3421ca3bae0f200" ]
[ "generate_training_data.py" ]
[ "'''\nDump all your images in dataset/output.\nThis script will generate the corresponding low-res images in dataset/input.\n1. Apply Gaussian blur\n2. Downsample images\n'''\n\nimport os\nimport numpy as np\nimport scipy\nimport scipy.misc, scipy.ndimage\nfrom scipy.misc import imsave, imread, imresize\nfrom scipy.ndimage.filters import gaussian_filter\nimport tqdm\nimport time\n\nimport img_utils\n\n# Paths\nDATASET_DIR = '/Users/admin/Downloads/images_all/'\nTARGET_DIR = '/Users/admin/Dev/super-resolution/dataset/'\n\n# Constants\nSCALE_FACTOR = 2\nIMG_SHAPE = 256 # must be divisible by stride\nSTRIDE = 16\nLR_PATCH_SIZE = 32\nHR_PATCH_SIZE = LR_PATCH_SIZE * SCALE_FACTOR\n\n# Create target directory if not present\nif not os.path.exists(TARGET_DIR):\n os.makedirs(TARGET_DIR)\nif not os.path.exists(os.path.join(TARGET_DIR, 'Y')):\n os.makedirs(os.path.join(TARGET_DIR, 'Y'))\nif not os.path.exists(os.path.join(TARGET_DIR, 'X')):\n os.makedirs(os.path.join(TARGET_DIR, 'X'))\n\nfor index, img_file in enumerate(os.listdir(DATASET_DIR)):\n img = imread(os.path.join(DATASET_DIR, img_file), mode='RGB')\n\n # Resize to 256 x 256\n img = imresize(img, (IMG_SHAPE, IMG_SHAPE))\n\n # Create patches\n nb_hr_images = (IMG_SHAPE ** 2) // (STRIDE ** 2) # Flooring division\n hr_samples = np.empty((nb_hr_images, HR_PATCH_SIZE, HR_PATCH_SIZE, 3))\n image_subsample_iterator = img_utils.subimage_generator(img, STRIDE, HR_PATCH_SIZE, nb_hr_images)\n\n stride_range = np.sqrt(nb_hr_images).astype(int)\n\n i = 0\n for j in range(stride_range):\n for k in range(stride_range):\n hr_samples[i, :, :, :] = next(image_subsample_iterator)\n i += 1\n\n t1 = time.time()\n # Create nb_hr_images 'X' and 'Y' sub-images of size hr_patch_size for each patch\n for i in range(nb_hr_images):\n ip = hr_samples[i]\n\n # Save ground truth image Y\n imsave(TARGET_DIR + \"/Y/\" + \"%d_%d.png\" % (index + 1, i + 1), ip)\n\n # Apply Gaussian Blur to Y\n op = gaussian_filter(ip, sigma=0.5)\n\n # Subsample by scaling factor to create X\n op = imresize(op, (LR_PATCH_SIZE, LR_PATCH_SIZE), interp='bicubic')\n\n # Ensure that size of X = size of Y\n op = imresize(op, (HR_PATCH_SIZE, HR_PATCH_SIZE), interp='bicubic')\n\n # Save X\n imsave(TARGET_DIR + \"/X/\" + \"%d_%d.png\" % (index + 1, i+1), op)\n\n print(\"Finished image %d in time %0.2f seconds.. \" % (index + 1, time.time() - t1))\n if index + 1 >= 50:\n break\n" ]
[ [ "scipy.misc.imresize", "numpy.sqrt", "scipy.misc.imsave", "scipy.ndimage.filters.gaussian_filter", "numpy.empty" ] ]
nvaytet/osyris
[ "7deb57d2e6e6635fd4f065a196466d1db02644fc" ]
[ "src/osyris/io/amr.py" ]
[ "# SPDX-License-Identifier: BSD-3-Clause\n# Copyright (c) 2021 Osyris contributors (https://github.com/nvaytet/osyris)\nimport numpy as np\nfrom .hilbert import hilbert_cpu_list\nfrom .reader import Reader, ReaderKind\nfrom .. import config\nfrom .. import units\nfrom . import utils\n\n\nclass AmrReader(Reader):\n def __init__(self):\n super().__init__(kind=ReaderKind.AMR)\n\n def initialize(self, meta, select):\n length_unit = config.get_unit(\"x\", meta[\"unit_d\"], meta[\"unit_l\"],\n meta[\"unit_t\"])\n if meta[\"scale\"] is not None:\n scale = units(meta[\"scale\"])\n scaling = (length_unit.to(scale) / scale).magnitude * scale\n else:\n scaling = length_unit\n\n # AMR grid variables\n self.variables.update({\n \"level\": {\n \"read\": True,\n \"type\": \"i\",\n \"buffer\": None,\n \"pieces\": {},\n \"unit\": 1.0 * units.dimensionless\n },\n \"cpu\": {\n \"read\": True,\n \"type\": \"i\",\n \"buffer\": None,\n \"pieces\": {},\n \"unit\": 1.0 * units.dimensionless\n },\n \"dx\": {\n \"read\": True,\n \"type\": \"d\",\n \"buffer\": None,\n \"pieces\": {},\n \"unit\": scaling\n }\n })\n self.variables.update({\n \"xyz_{}\".format(c): {\n \"read\": True,\n \"type\": \"d\",\n \"buffer\": None,\n \"pieces\": {},\n \"unit\": scaling\n }\n for c in \"xyz\"[:meta[\"ndim\"]]\n })\n\n self.cpu_list = hilbert_cpu_list(meta=meta,\n scaling=scaling,\n select=select,\n infofile=meta[\"infofile\"])\n self.initialized = True\n\n def allocate_buffers(self, ngridmax, twotondim):\n super().allocate_buffers(ngridmax, twotondim)\n self.xcent = np.zeros([8, 3], dtype=np.float64)\n self.xg = np.zeros([ngridmax, 3], dtype=np.float64)\n self.son = np.zeros([ngridmax, twotondim], dtype=np.int32)\n self.ref = np.zeros([ngridmax, twotondim], dtype=np.bool)\n\n def read_header(self, info):\n # nx,ny,nz\n self.offsets[\"i\"] += 2\n self.offsets[\"n\"] += 2\n [nx, ny, nz] = utils.read_binary_data(fmt=\"3i\",\n content=self.bytes,\n offsets=self.offsets)\n ncoarse = nx * ny * nz\n self.meta[\"xbound\"] = [\n float(int(nx / 2)),\n float(int(ny / 2)),\n float(int(nz / 2))\n ]\n\n # nboundary\n self.offsets[\"i\"] += 2\n self.offsets[\"n\"] += 2\n [self.meta[\"nboundary\"]] = utils.read_binary_data(fmt=\"i\",\n content=self.bytes,\n offsets=self.offsets)\n self.meta[\"ngridlevel\"] = np.zeros(\n [info[\"ncpu\"] + self.meta[\"nboundary\"], info[\"levelmax\"]], dtype=np.int32)\n\n # noutput\n self.offsets[\"i\"] += 1\n self.offsets[\"n\"] += 2\n self.offsets[\"d\"] += 1\n [noutput] = utils.read_binary_data(fmt=\"i\",\n content=self.bytes,\n offsets=self.offsets)\n # dtold, dtnew\n self.offsets[\"i\"] += 2\n self.offsets[\"n\"] += 3\n self.offsets[\"d\"] += 1 + 2 * noutput\n info[\"dtold\"] = np.array(\n utils.read_binary_data(fmt=\"{}d\".format(info[\"levelmax\"]),\n content=self.bytes,\n offsets=self.offsets))\n info[\"dtnew\"] = np.array(\n utils.read_binary_data(fmt=\"{}d\".format(info[\"levelmax\"]),\n content=self.bytes,\n offsets=self.offsets))\n\n # Read the number of grids\n self.offsets[\"i\"] += 2 + (2 * info[\"ncpu\"] * info[\"levelmax\"])\n self.offsets[\"n\"] += 7\n self.offsets[\"d\"] += 16\n self.meta[\"ngridlevel\"][:info[\"ncpu\"], :] = np.array(\n utils.read_binary_data(fmt=\"{}i\".format(info[\"ncpu\"] * info[\"levelmax\"]),\n content=self.bytes,\n offsets=self.offsets)).reshape(\n info[\"levelmax\"], info[\"ncpu\"]).T\n\n # Read boundary grids if any\n self.offsets[\"i\"] += 10 * info[\"levelmax\"]\n self.offsets[\"n\"] += 3\n if self.meta[\"nboundary\"] > 0:\n self.offsets[\"i\"] += (2 * self.meta[\"nboundary\"] * info[\"levelmax\"])\n # self.offsets[\"n\"] += 4\n self.meta[\"ngridlevel\"][info[\"ncpu\"]:info[\"ncpu\"] +\n self.meta[\"nboundary\"], :] = np.array(\n utils.read_binary_data(\n fmt=\"{}i\".format(self.meta[\"nboundary\"] *\n info[\"levelmax\"]),\n content=self.bytes,\n offsets=self.offsets)).reshape(\n info[\"levelmax\"],\n self.meta[\"nboundary\"]).T\n self.offsets[\"n\"] += 2\n\n # Determine bound key precision\n self.offsets[\"i\"] += 5\n self.offsets[\"s\"] += 128\n [key_size] = utils.read_binary_data(fmt=\"i\",\n content=self.bytes,\n offsets=self.offsets,\n skip_head=False,\n increment=False)\n\n # Offset for AMR\n self.offsets[\"i\"] += 3 * ncoarse\n self.offsets[\"n\"] += 3\n self.offsets[\"s\"] += key_size\n\n def read_level_header(self, ilevel, twotondim):\n # Geometry\n self.dxcell = 0.5**(ilevel + 1)\n for ind in range(twotondim):\n iz = int((ind) / 4)\n iy = int((ind - 4 * iz) / 2)\n ix = int((ind - 2 * iy - 4 * iz))\n self.xcent[ind, 0] = (float(ix) - 0.5) * self.dxcell\n self.xcent[ind, 1] = (float(iy) - 0.5) * self.dxcell\n self.xcent[ind, 2] = (float(iz) - 0.5) * self.dxcell\n\n def read_cacheline_header(self, ncache, ndim):\n # xg: grid coordinates\n self.offsets['i'] += ncache * 3\n self.offsets['n'] += 3\n for n in range(ndim):\n self.xg[:ncache, n] = utils.read_binary_data(fmt=\"{}d\".format(ncache),\n content=self.bytes,\n offsets=self.offsets)\n\n # son indices\n self.offsets['i'] += ncache * (1 + 2 * ndim)\n self.offsets['n'] += 1 + 2 * ndim\n\n def read_variables(self, ncache, ind, ilevel, cpuid, info):\n\n self.son[:ncache, ind] = utils.read_binary_data(fmt=\"{}i\".format(ncache),\n content=self.bytes,\n offsets=self.offsets)\n\n self.variables[\"level\"][\"buffer\"]._array[:ncache, ind] = ilevel + 1\n for n in range(info[\"ndim\"]):\n key = \"xyz_\" + \"xyz\"[n]\n self.variables[key][\"buffer\"]._array[:ncache, ind] = (\n self.xg[:ncache, n] + self.xcent[ind, n] - self.meta[\"xbound\"][n]\n ) * info[\"boxlen\"] * self.variables[key][\"unit\"].magnitude\n self.variables[\"dx\"][\"buffer\"]._array[:ncache, ind] = self.dxcell * info[\n \"boxlen\"] * self.variables[\"dx\"][\"unit\"].magnitude\n self.variables[\"cpu\"][\"buffer\"]._array[:ncache, ind] = cpuid + 1\n\n # Note: use lmax here instead of levelmax because the user might not\n # want to load all levels. levelmax is always the max level in the\n # entire simulation.\n self.ref[:ncache, ind] = np.logical_not(\n np.logical_and(self.son[:ncache, ind] > 0, ilevel < info[\"lmax\"] - 1))\n\n def make_conditions(self, select, ncache):\n conditions = super().make_conditions(select, ncache)\n conditions.update({\"leaf\": self.ref[:ncache, :]})\n return conditions\n\n def read_footer(self, ncache, twotondim):\n # Increment offsets with remainder of the file\n self.offsets['i'] += ncache * 2 * twotondim\n self.offsets['n'] += 2 * twotondim\n\n def step_over(self, ncache, twotondim, ndim):\n self.offsets['i'] += ncache * (4 + 3 * twotondim + 2 * ndim)\n self.offsets['d'] += ncache * ndim\n self.offsets['n'] += 4 + 3 * twotondim + 3 * ndim\n" ]
[ [ "numpy.logical_and", "numpy.zeros" ] ]
stoneyang159/fer_pytorch
[ "32e0748a084a366c70da1d88608050544e56c4bf" ]
[ "fer_pytorch/models/backbone/cafferesnet.py" ]
[ "from __future__ import print_function, division, absolute_import\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\n\npretrained_settings = {\n 'cafferesnet101': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/cafferesnet101-9d633cc0.pth',\n 'input_space': 'BGR',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 255],\n 'mean': [102.9801, 115.9465, 122.7717],\n 'std': [1, 1, 1],\n 'num_classes': 1000\n }\n }\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n # it is slightly better whereas slower to set stride = 1\n # self.layer4 = self._make_layer(block, 512, layers[3], stride=1)\n self.avgpool = nn.AvgPool2d(7)\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def features(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\n def logits(self, x):\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.last_linear(x)\n return x\n\n def forward(self, x):\n x = self.features(x)\n x = self.logits(x)\n return x\n\n\ndef cafferesnet101(num_classes=1000, pretrained='imagenet'):\n \"\"\"Constructs a ResNet-101 models.\n Args:\n pretrained (bool): If True, returns a models pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['cafferesnet101'][pretrained]\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n model.load_state_dict(model_zoo.load_url(settings['url']))\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model" ]
[ [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.utils.model_zoo.load_url" ] ]
amaiya/stellargraph
[ "512e60a8f572a4bb432b0397a2b452251e167d8f", "ef5588038896c1a65db467a768f2e023c6562611" ]
[ "tests/layer/test_cluster_gcn.py", "stellargraph/data/converter.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright 2018-2019 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nCluster-GCN tests\n\n\"\"\"\nfrom tensorflow.keras import backend as K\nfrom stellargraph.layer.cluster_gcn import *\nfrom stellargraph.mapper import ClusterNodeGenerator\nfrom stellargraph.core.graph import StellarGraph\n\nimport networkx as nx\nimport pandas as pd\nimport numpy as np\nfrom tensorflow import keras\nimport pytest\n\n\ndef create_graph_features():\n G = nx.Graph()\n G.add_nodes_from([\"a\", \"b\", \"c\"])\n G.add_edges_from([(\"a\", \"b\"), (\"b\", \"c\"), (\"a\", \"c\")])\n G = G.to_undirected()\n return G, np.array([[1, 1], [1, 0], [0, 1]])\n\n\ndef create_stellargraph():\n Gnx, features = create_graph_features()\n nodes = Gnx.nodes()\n node_features = pd.DataFrame.from_dict(\n {n: f for n, f in zip(nodes, features)}, orient=\"index\"\n )\n G = StellarGraph(Gnx, node_features=node_features)\n\n return G\n\n\ndef test_ClusterGraphConvolution_config():\n cluster_gcn_layer = ClusterGraphConvolution(units=16)\n conf = cluster_gcn_layer.get_config()\n\n assert conf[\"units\"] == 16\n assert conf[\"activation\"] == \"linear\"\n assert conf[\"use_bias\"] == True\n assert conf[\"kernel_initializer\"][\"class_name\"] == \"GlorotUniform\"\n assert conf[\"bias_initializer\"][\"class_name\"] == \"Zeros\"\n assert conf[\"kernel_regularizer\"] == None\n assert conf[\"bias_regularizer\"] == None\n assert conf[\"kernel_constraint\"] == None\n assert conf[\"bias_constraint\"] == None\n\n\ndef test_ClusterGraphConvolution_init():\n cluster_gcn_layer = ClusterGraphConvolution(units=16, activation=\"relu\")\n\n assert cluster_gcn_layer.units == 16\n assert cluster_gcn_layer.use_bias == True\n assert cluster_gcn_layer.get_config()[\"activation\"] == \"relu\"\n\n\ndef test_GraphConvolution():\n G, features = create_graph_features()\n\n # We need to specify the batch shape as one for the ClusterGraphConvolutional logic to work\n x_t = Input(batch_shape=(1,) + features.shape, name=\"X\")\n A_t = Input(batch_shape=(1, 3, 3), name=\"A\")\n output_indices_t = Input(batch_shape=(1, None), dtype=\"int32\", name=\"outind\")\n\n # Note we add a batch dimension of 1 to model inputs\n adj = nx.to_numpy_array(G)[None, :, :]\n out_indices = np.array([[0, 1]], dtype=\"int32\")\n x = features[None, :, :]\n\n # Remove batch dimension\n A_mat = Lambda(lambda A: K.squeeze(A, 0))(A_t)\n\n # Test with final_layer=False\n out = ClusterGraphConvolution(2, final_layer=False)([x_t, output_indices_t, A_mat])\n model = keras.Model(inputs=[x_t, A_t, output_indices_t], outputs=out)\n preds = model.predict([x, adj, out_indices], batch_size=1)\n assert preds.shape == (1, 3, 2)\n\n # Now try with final_layer=True\n out = ClusterGraphConvolution(2, final_layer=True)([x_t, output_indices_t, A_mat])\n # The final layer removes the batch dimension and causes the call to predict to fail.\n # We are going to manually add the batch dimension before calling predict.\n out = K.expand_dims(out, 0)\n model = keras.Model(inputs=[x_t, A_t, output_indices_t], outputs=out)\n print(\n f\"x_t: {x_t.shape} A_t: {A_t.shape} output_indices_t: {output_indices_t.shape}\"\n )\n preds = model.predict([x, adj, out_indices], batch_size=1)\n assert preds.shape == (1, 2, 2)\n\n # Check for errors with batch size != 1\n # We need to specify the batch shape as one for the ClusterGraphConvolutional logic to work\n x_t = Input(batch_shape=(2,) + features.shape)\n output_indices_t = Input(batch_shape=(2, None), dtype=\"int32\")\n with pytest.raises(ValueError):\n out = ClusterGraphConvolution(2)([x_t, A_t, output_indices_t])\n\n\ndef test_ClusterGCN_init():\n G, features = create_graph_features()\n nodes = G.nodes()\n node_features = pd.DataFrame.from_dict(\n {n: f for n, f in zip(nodes, features)}, orient=\"index\"\n )\n G = StellarGraph(G, node_type_name=\"node\", node_features=node_features)\n\n generator = ClusterNodeGenerator(G)\n cluster_gcn_model = ClusterGCN(\n layer_sizes=[2], generator=generator, activations=[\"relu\"], dropout=0.5\n )\n\n assert cluster_gcn_model.layer_sizes == [2]\n assert cluster_gcn_model.activations == [\"relu\"]\n assert cluster_gcn_model.dropout == 0.5\n\n\ndef test_ClusterGCN_apply():\n\n G = create_stellargraph()\n\n generator = ClusterNodeGenerator(G)\n\n cluster_gcn_model = ClusterGCN(\n layer_sizes=[2], generator=generator, activations=[\"relu\"], dropout=0.0\n )\n\n x_in, x_out = cluster_gcn_model.build()\n model = keras.Model(inputs=x_in, outputs=x_out)\n\n # Check fit_generator method\n preds_2 = model.predict_generator(generator.flow([\"a\", \"b\", \"c\"]))\n assert preds_2.shape == (1, 3, 2)\n\n\ndef test_ClusterGCN_activations():\n\n G = create_stellargraph()\n generator = ClusterNodeGenerator(G)\n\n # Test activations are set correctly\n cluster_gcn = ClusterGCN(layer_sizes=[2], generator=generator, activations=[\"relu\"])\n assert cluster_gcn.activations == [\"relu\"]\n\n cluster_gcn = ClusterGCN(\n layer_sizes=[2, 2], generator=generator, activations=[\"relu\", \"relu\"]\n )\n assert cluster_gcn.activations == [\"relu\", \"relu\"]\n\n cluster_gcn = ClusterGCN(\n layer_sizes=[2], generator=generator, activations=[\"linear\"]\n )\n assert cluster_gcn.activations == [\"linear\"]\n\n with pytest.raises(TypeError):\n # activations for layers must be specified\n ClusterGCN(layer_sizes=[2], generator=generator)\n\n with pytest.raises(AssertionError):\n # More activations than layers\n ClusterGCN(layer_sizes=[2], generator=generator, activations=[\"relu\", \"linear\"])\n\n with pytest.raises(AssertionError):\n # Fewer activations than layers\n ClusterGCN(layer_sizes=[2, 2], generator=generator, activations=[\"relu\"])\n\n with pytest.raises(ValueError):\n # Unknown activation\n ClusterGCN(layer_sizes=[2], generator=generator, activations=[\"bleach\"])\n\n\ndef test_ClusterGCN_regularisers():\n G = create_stellargraph()\n\n generator = ClusterNodeGenerator(G)\n\n cluster_gcn = ClusterGCN(\n layer_sizes=[2],\n activations=[\"relu\"],\n generator=generator,\n kernel_regularizer=keras.regularizers.l2(),\n )\n\n with pytest.raises(ValueError):\n ClusterGCN(\n layer_sizes=[2],\n activations=[\"relu\"],\n generator=generator,\n kernel_regularizer=\"fred\",\n )\n\n cluster_gcn = ClusterGCN(\n layer_sizes=[2],\n activations=[\"relu\"],\n generator=generator,\n bias_initializer=\"zeros\",\n )\n\n cluster_gcn = ClusterGCN(\n layer_sizes=[2],\n activations=[\"relu\"],\n generator=generator,\n bias_initializer=initializers.zeros(),\n )\n\n with pytest.raises(ValueError):\n ClusterGCN(\n layer_sizes=[2],\n activations=[\"relu\"],\n generator=generator,\n bias_initializer=\"barney\",\n )\n", "# -*- coding: utf-8 -*-\n#\n# Copyright 2018-2019 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nimport numpy as np\nfrom tensorflow.keras.utils import to_categorical\nfrom stellargraph.core.graph import StellarGraphBase\n\n\nclass NodeAttributeSpecification:\n \"\"\"\n This class converts numeric and non-numeric node attributes to the appropriate\n numeric vectors for machine learning.\n\n In the StellarML library, all machine learning tasks that require feature and target\n attribute specifications should be passed an object of this class.\n\n # Usage\n\n Instantiation::\n\n nfs = NodeAttributeSpecification()\n\n To add an attribute for a node type, choose an appropriate Converter class\n and use the following methods:\n\n For a single attribute of node type node_type use `add_attribute`::\n\n nfs.add_attribute(node_type, attribute_name, Converter, <converter parameters>)\n\n For multiple attributes of a single node type, using a single converter class,\n use `add_attribute_list`::\n\n nfs.add_attribute_list(node_type, attribute_name, Converter, <converter paramters>)\n\n To add all attributes using the same converter class, use `add_all_attributes`,\n you will need to provide a StellarGraph object so that the node attributes can\n be extracted::\n\n nfs.add_all_attributes(graph, node_type, Converter, <converter paramters>)\n\n\n # Converter classes:\n\n There are multiple converter classes that can be used depending upon the\n attribute values and whether the attribute specification required is for features\n or targets.\n\n * BinaryConverter:\n This converter will create a value with a one if the attribute exists in the node\n attributes and zero if it does not.\n\n * CategorigalConverter:\n This converter takes an attribute that has multiple values (categories) and converts\n the categories to integers.\n\n * OneHotCategorigalConverter:\n This converter takes an attribute that has multiple values (categories) and converts\n the categories to one-hot vectors of length equal to the number of categories.\n\n * NumericConverter:\n This converter takes an attribute that has integer or floating point values and\n optionally normalizes them by mean and standard deviation.\n\n More inforamtion on these converters and their parameters can be found in their individual\n documentation. Also note that the converter parameters should be passed to the\n attribute specification methods, not directly to the converter.\n\n \"\"\"\n\n def __init__(self):\n self._node_specs = {}\n self._node_feature_specs = {}\n\n def add_attribute(self, node_type, attr, converter, **conv_args):\n \"\"\"\n Add a named attribute with specified converter for a node type\n\n Args:\n node_type: Node type that contains the attribute (must be specified, even\n if there is only a single node type)\n attr: Attribute name\n converter: Converter class (this should be the class, not an object)\n **conv_args: Optional arguemnts to the converter, specific to converter\n \"\"\"\n\n if not issubclass(converter, StellarAttributeConverter):\n raise TypeError(\n \"Converter should be a subclass of StellarAttributeConverter\"\n )\n\n node_type_spec = self._node_specs.get(node_type, {})\n node_type_spec[attr] = converter(**conv_args)\n self._node_specs[node_type] = node_type_spec\n\n def add_attribute_list(self, node_type, attrs, converter, **conv_args):\n \"\"\"\n Add multiple named attributes with the specified converter, note that\n an individual converter object will be created for each attribute.\n\n Args:\n node_type: Node type that contains the attribute names (must be specified,\n even if there is only a single node type)\n attrs: List of attribute names to use)\n converter: Converter class (this should be the class, not an object)\n **conv_args: Optional arguments to the converter, specific to converter\n \"\"\"\n if not issubclass(converter, StellarAttributeConverter):\n raise TypeError(\n \"Converter should be a subclass of StellarAttributeConverter\"\n )\n\n node_type_spec = self._node_specs.get(node_type, {})\n for attr in attrs:\n node_type_spec[attr] = converter(**conv_args)\n self._node_specs[node_type] = node_type_spec\n\n def add_all_attributes(\n self, graph, node_type, converter, ignored_attributes=[], **conv_args\n ):\n \"\"\"\n Add multiple named attributes with the specified converter to all\n attributes of the given node type found in the graph.\n\n Args:\n graph: A StellarGraph object containing nodes of the specified type.\n node_type: Node type that contains the attribute names (must be specified,\n even if there is only a single node type)\n converter: Converter class (this should be the class, not an object)\n ignored_attributes: (Optional) a list of attribute names to not include.\n **conv_args: Optional arguments to the converter, specific to converter\n \"\"\"\n if not issubclass(converter, StellarAttributeConverter):\n raise TypeError(\n \"Converter should be a subclass of StellarAttributeConverter\"\n )\n if not isinstance(graph, StellarGraphBase):\n raise TypeError(\"Graph should be a StellarGraph\")\n\n # Go through graph to find node attributes\n all_attrs = set(\n k for v in graph.nodes_of_type(node_type) for k in graph.nodes[v].keys()\n )\n\n # Remove any ignored attributes\n attrs = all_attrs.difference(set(ignored_attributes))\n\n # Don't use node type as attribute:\n attrs.discard(graph._node_type_attr)\n\n # Set found attributes with converter\n self.add_attribute_list(node_type, attrs, converter, **conv_args)\n\n def has_type(self, node_type):\n \"\"\"\n Returns True if the specified type exists in the attribute specification\n\n Args:\n node_type: String specifying the node type\n\n Returns:\n A bool specifying if the node type exists.\n \"\"\"\n return node_type in self._node_specs\n\n def get_types(self):\n \"\"\"\n Returns a list of the node types in this attribute specification\n \"\"\"\n return list(self._node_specs.keys())\n\n def get_attributes(self, node_type=None):\n \"\"\"\n Get the list of attributes in a defined order for the given node type.\n\n Args:\n node_type: Node type key, if None and there is a single node type\n the attributes of that type are returned.\n\n Returns:\n List of attribute IDs\n \"\"\"\n if node_type is None:\n if len(self._node_specs) == 1:\n node_attrs = next(iter(self._node_specs.values())).keys()\n else:\n raise RuntimeError(\n \"Please specify the node type when there are multiple node types\"\n )\n\n elif node_type in self._node_specs:\n node_attrs = self._node_specs[node_type].keys()\n\n else:\n raise ValueError(\n \"There are no nodes of type '{}' set as targets\".format(node_type)\n )\n return sorted(node_attrs, key=str)\n\n def get_feature_indices(self, node_type):\n \"\"\"\n Gives the ranges of the indices in the numeric vector\n corresponding to each attribute the specification.\n\n Args:\n node_type: The node type\n\n Returns:\n A dictionary of attribute index ranges in the form:\n ```\n { attribute_jj : (start_index, end_index) ... }\n ```\n \"\"\"\n if node_type not in self._node_specs:\n return {}\n\n node_type_spec = self._node_specs[node_type]\n feature_list = sorted(node_type_spec.keys(), key=str)\n\n # Run over sorted array and map attribute to\n # range of values in the feature\n start_ind = 0\n feature_id_to_range = {}\n for attr in feature_list:\n conv = node_type_spec[attr]\n end_ind = start_ind + len(conv)\n feature_id_to_range[attr] = (start_ind, end_ind)\n start_ind = end_ind\n\n return feature_id_to_range\n\n def get_converter(self, node_type, attr):\n \"\"\"\n Get the converter object for the specified node type and attribute name\n Args:\n node_type: Node type\n attr: Attribute name\n\n Returns:\n The converter object\n \"\"\"\n\n if node_type not in self._node_specs:\n raise KeyError(\"Node type '{}' not in known node types.\".format(node_type))\n if attr not in self._node_specs[node_type]:\n raise KeyError(\n \"Attribute '{}' not known for node type {}.\".format(attr, node_type)\n )\n return self._node_specs[node_type][attr]\n\n def get_output_size(self, node_type=None):\n \"\"\"\n Get the size of the output vector for the node_type\n\n Args:\n node_type: The node type\n\n Returns:\n An integer specifying the vector length for this node type\n \"\"\"\n if node_type is None:\n if len(self._node_specs) == 1:\n node_type = next(iter(self._node_specs.keys()))\n else:\n raise ValueError(\n \"Node type must be specified if there are multiple node types\"\n )\n elif node_type not in self._node_specs:\n raise ValueError(\n \"Node type '{}' not found in attribute specification.\".format(node_type)\n )\n\n return np.sum([len(conv) for conv in self._node_specs[node_type].values()])\n\n def fit_transform(self, node_type, data):\n \"\"\"\n Fit the converters for the given node type to the data and convert the\n data to output vectors.\n\n Args:\n node_type: The node type\n data: A list of dictionaries containing attribute names and values\n\n Returns:\n A numpy array containing the values of the converted attributes, of\n shape (length of data, output size)\n\n \"\"\"\n n_data = len(data)\n\n # Convert attribute data to numeric values for each attribute\n converted_features = {}\n attr_list = self.get_attributes(node_type)\n for attr_name in attr_list:\n attr_data = [d.get(attr_name) for d in data]\n conv = self.get_converter(node_type, attr_name)\n converted_features[attr_name] = conv.fit_transform(attr_data)\n\n # Store features in array\n feature_array = np.concatenate(\n [\n np.reshape(converted_features[attr_name], (n_data, -1))\n for attr_name in attr_list\n ],\n axis=1,\n )\n return feature_array\n\n def transform(self, node_type, data):\n \"\"\"\n Convert the supplied data to numeric vectors, this assumes that the converters\n have previously been trained.\n\n Args:\n node_type: The node type\n data: A list of dictionaries containing attribute names and values\n\n Returns:\n A numpy array containing the values of the converted attributes, of\n shape (length of data, output size)\n\n \"\"\"\n n_data = len(data)\n\n # Convert attribute data to numeric values for each attribute\n converted_features = {}\n attr_list = self.get_attributes(node_type)\n for attr_name in attr_list:\n attr_data = [d.get(attr_name) for d in data]\n conv = self.get_converter(node_type, attr_name)\n converted_features[attr_name] = conv.transform(attr_data)\n\n # Store features in array\n feature_array = np.concatenate(\n [\n np.reshape(converted_features[attr_name], (n_data, -1))\n for attr_name in attr_list\n ],\n axis=1,\n )\n return feature_array\n\n def inverse_transform(self, node_type, data):\n \"\"\"\n Convert the supplied numeric vectors back to the form of the original data.\n\n Args:\n node_type: The node type\n data: A numpy array of numeric data.\n\n Returns:\n A list containing the input attributes.\n\n \"\"\"\n n_data = len(data)\n\n # The indices in the transformed vector for each attribute\n indices_for_attr = self.get_feature_indices(node_type)\n\n # Convert numeric values to the original domain for each attribute\n converted_features = {}\n attr_list = self.get_attributes(node_type)\n for attr_name in attr_list:\n conv = self.get_converter(node_type, attr_name)\n\n assert conv is not None\n assert attr_name in indices_for_attr\n\n # Extract data for this attribute\n index_range = indices_for_attr[attr_name]\n attr_data = data[:, index_range[0] : index_range[1]]\n\n converted_features[attr_name] = conv.inverse_transform(attr_data)\n\n # Convert to a list\n attr_out = [\n {attr_name: converted_features[attr_name][ii] for attr_name in attr_list}\n for ii in range(n_data)\n ]\n return attr_out\n\n\nclass StellarAttributeConverter(ABC):\n \"\"\"\n Abstract class for attribute converters.\n \"\"\"\n\n @abstractmethod\n def __len__(self):\n pass\n\n @abstractmethod\n def fit_transform(self):\n pass\n\n @abstractmethod\n def transform(self):\n pass\n\n @abstractmethod\n def inverse_transform(self):\n pass\n\n\nclass NumericConverter(StellarAttributeConverter):\n \"\"\"\n This converter takes an attribute that has integer or floating point values and\n optionally normalizes them by mean and standard deviation.\n\n Args:\n dtype: (Optional) convert to a vector of this numpy data type\n default_value: (Optional) if the attribute is missing, if this is \"mean\" (default)\n assign the mean value calculated over the valid data, if this is a\n float or int, assign that value directly.\n normalize: (Optional) if this is \"standard\" normalize the values by shifting and\n scaling the values so that the mean is zero and the standard deviation is one.\n \"\"\"\n\n def __init__(self, dtype=\"float32\", default_value=\"mean\", normalize=\"standard\"):\n self.dtype = dtype\n self.normalize = normalize\n self.default_value = default_value\n\n def __len__(self):\n # TODO: extend this to multiple values\n return 1\n\n def fit_transform(self, data):\n data = np.asarray(data, dtype=self.dtype)\n\n # Calculate normalization parameters\n if self.normalize == \"standard\":\n self.scale = np.nanstd(data, axis=0)\n self.offset = np.nanmean(data, axis=0)\n else:\n self.scale = 1\n self.offset = 0\n\n if self.scale < 1e-6:\n raise ValueError(\n \"When trying to normalize the data, the standard deviation close to zero.\"\n )\n\n return self.transform(data)\n\n def transform(self, data):\n data = np.asarray(data, dtype=self.dtype)\n\n # Normalization\n if self.normalize == \"standard\":\n data = (data - self.offset) / self.scale\n\n # Fill missing values\n if self.default_value == \"mean\":\n fill_value = np.nanmean(data)\n elif self.default_value == \"median\":\n fill_value = np.nanmedian(data)\n elif np.isscalar(self.default_value):\n fill_value = self.default_value\n\n data = np.where(np.isfinite(data), data, fill_value)\n return data\n\n def inverse_transform(self, data):\n data = np.asanyarray(data)\n\n # De-normalization\n if self.normalize == \"standard\":\n data = data * self.scale + self.offset\n\n # We can't un-fill missing values!\n return np.squeeze(data)\n\n\nclass CategoricalConverter(StellarAttributeConverter):\n \"\"\"\n This converter takes an attribute that has multiple values (categories) and converts\n the categories to integers.\n\n Args:\n default_value: Value to assign to the vector output when the attribute is missing.\n dtype: (Optional) convert to a vector of this numpy data type\n\n \"\"\"\n\n def __init__(self, default_value=0, dtype=\"float32\"):\n self.default_value = default_value\n self.dtype = dtype\n self.categories = []\n\n def __len__(self):\n return 1\n\n def fit_transform(self, data):\n self.categories = sorted(set(data), key=str)\n return self.transform(data)\n\n def transform(self, data):\n # TODO: Checks for data input\n return np.array(\n [\n self.categories.index(d) if d is not None else self.default_value\n for d in data\n ],\n dtype=self.dtype,\n )\n\n def inverse_transform(self, data):\n # TODO: Checks for data input\n return [self.categories[int(ii)] for ii in data]\n\n\nclass OneHotCategoricalConverter(StellarAttributeConverter):\n \"\"\"\n This converter takes an attribute that has multiple values (categories) and converts\n the categories to one-hot vectors of length equal to the number of categories.\n\n Args:\n default_value: (Optional) value to assign to the vector output when the attribute is missing.\n without_first: (Optional) Return a vector that omits the first value, so is zero when\n the first category is supplied. This can be useful for inputs to DL systems.\n dtype: (Optional) convert to a vector of this numpy data type\n \"\"\"\n\n def __init__(self, default_value=0, without_first=False, dtype=\"float32\"):\n self.default_value = default_value\n self.without_first = without_first\n self.dtype = dtype\n self.categories = []\n\n def fit_transform(self, data):\n self.categories = sorted(set(data), key=str)\n if len(self.categories) == 1:\n print(\"Warning: Only one category for attribute\")\n\n return self.transform(data)\n\n def __len__(self):\n if self.without_first:\n size = len(self.categories) - 1\n else:\n size = len(self.categories)\n return size\n\n def transform(self, data):\n data_cats = [\n self.categories.index(d) if d is not None else self.default_value\n for d in data\n ]\n\n # Otherwise use the Keras to_categorical function\n data_trans = to_categorical(data_cats, len(self.categories)).astype(self.dtype)\n\n # If the without_first is set, remove the first value\n if self.without_first:\n data_trans = data_trans[:, 1:]\n\n return data_trans\n\n def inverse_transform(self, data):\n data = np.asanyarray(data)\n assert np.ndim(data) == 2\n\n # Get an integer category, adding one if we have without_first=True\n category_id = np.argmax(data, axis=1)\n if self.without_first:\n category_id = (category_id + 1) * np.any(data, axis=1).astype(int)\n\n return [self.categories[ii] for ii in category_id]\n\n\nclass BinaryConverter(StellarAttributeConverter):\n \"\"\"\n This converter will create a value with a one if the attribute exists in the node\n attributes and zero if it does not.\n\n Args:\n default_value: Value to assign to the vector output when the attribute is missing.\n dtype: (Optional) convert to a vector of this numpy data type\n\n \"\"\"\n\n def __init__(self, dtype=\"float32\", default_value=0):\n self.dtype = dtype\n self.default_value = default_value\n\n def __len__(self):\n return 1\n\n def fit_transform(self, data):\n return self.transform(data)\n\n def transform(self, data):\n data_bool = [\n bool(d) if d is not None else bool(self.default_value) for d in data\n ]\n return np.asarray(data_bool, dtype=self.dtype)\n\n def inverse_transform(self, data):\n return [None if d == 0 else 1 for d in data]\n" ]
[ [ "tensorflow.keras.regularizers.l2", "tensorflow.keras.backend.squeeze", "tensorflow.keras.Model", "tensorflow.keras.backend.expand_dims", "numpy.array" ], [ "numpy.nanmedian", "numpy.isfinite", "numpy.asarray", "numpy.reshape", "numpy.squeeze", "numpy.ndim", "numpy.asanyarray", "numpy.argmax", "numpy.nanmean", "numpy.isscalar", "numpy.any", "numpy.nanstd" ] ]
kwan3217/kwanspice
[ "38303ff516dabf965cdc754c48290187cc237da3" ]
[ "voyager/supertraj_camera.py" ]
[ "import spiceypy as cspice\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Solar system positions\ncspice.furnsh(\"../../Data/spice/generic/spk/planets/de430.bsp\")\n#Satellite positions\ncspice.furnsh(\"../../Data/spice/generic/spk/satellites/jup230l.bsp\")\n#Planet constants\ncspice.furnsh(\"../../Data/spice/generic/pck/pck00010.tpc\")\ncspice.furnsh(\"../../Data/spice/generic/pck/gm_de431.tpc\")\ncspice.furnsh(\"../../Data/spice/generic/pck/juno.tpc\") #Jupiter gravity field\n#Leap seconds\ncspice.furnsh(\"../../Data/spice/generic/lsk/naif0012.tls\")\n#Spacecraft kernel\ncspice.furnsh(\"../../Data/spice/Voyager/spk/voyager_2.ST+1992_m05208u.merged.bsp\") #Supertrajectory for Voyager 1\n\n#From original movie:\n#Frame 0-93 is title sequence\n#Frame 94 is high above Solar system (about 115AU), immediately begins move down\n#Frame 504, latitude stops changing\n#Frame 508, distance stops changing\n#Frame 708, Voyager 2 Launch 1977-08-20T14:29:45\n#Frame 712, Voyager 1 Launch 1977-09-05T12:56\n#Frame 862, Voyager 1 at Jupiter, 1979-03-05\n#Frame 896, Voyager 2 at Jupiter, 1979-07-09T22:29:00\n#Frame 1032, Voyager 1 at Saturn, 1980-11-12\n#Frame 1111, Voyager 2 at Saturn, 1981-08-25T03:24:05\n#Frame 1553, Voyager 2 at Uranus, 1986-01-24T17:59:47\n#Frame 1910, Voyager 2 at Neptune, 1989-08-25T03:56:36\n#Frame 1983, start moving back up\n#Frame 2171, end of movie\n\ndef linterp(x0,y0,x1,y1,x):\n t=(x-x0)/(x1-x0)\n return y0*(1-t)+y1*t\n\nframe0=94\nframe1=708\nframe2=1910\nframe3=2171\net1=cspice.str2et(\"1977-08-20 14:29:45 UTC\")\net_v2=cspice.str2et(\"1977-08-21 00:00:00 TDB\")\net2=cspice.str2et(\"1989-08-25 03:56:36 TDB\")\net0=linterp(frame1,et1,frame2,et2,frame0)\net3=linterp(frame1,et1,frame2,et2,frame3)\nframes=np.arange(0,frame3)\nets=linterp(frame1,et1,frame2,et2,frames)\nv2_headings=ets*float('nan')\ncam_headings=ets*0-35.56\nfor i,et in enumerate(ets):\n if et>et_v2:\n sc_state=cspice.spkezr(\"-32\",et,\"ECLIPB1950\",\"NONE\",\"0\")[0]\n v2_headings[i]=np.degrees(np.arctan2(-sc_state[4],-sc_state[3]))\n\npiecewise_x=[708]\npiecewise_y=[-35.56]\n\ncam_headings[708:866]=linterp(708,-35.56,866,v2_headings[866],frames[708:866])\nf0=866\npiecewise_x.append(f0)\npiecewise_y.append(v2_headings[f0])\nf1=937\ncam_headings[f0:f1]=linterp(f0,v2_headings[f0],f1,v2_headings[f1],frames[f0:f1])\nf0=f1\npiecewise_x.append(f0)\npiecewise_y.append(v2_headings[f0])\nf1=1040\ncam_headings[f0:f1]=v2_headings[f0:f1]\nf0=f1\npiecewise_x.append(f0)\npiecewise_y.append(v2_headings[f0])\nf1=1137\ncam_headings[f0:f1]=linterp(f0,v2_headings[f0],f1,v2_headings[f1],frames[f0:f1])\nf0=f1\npiecewise_x.append(f0)\npiecewise_y.append(v2_headings[f0])\npiecewise_x.append(1200)\npiecewise_y.append(v2_headings[1200])\npiecewise_x.append(1300)\npiecewise_y.append(v2_headings[1300])\npiecewise_x.append(1400)\npiecewise_y.append(v2_headings[1400])\nf1=1525\ncam_headings[f0:f1]=v2_headings[f0:f1]\nf0=f1\npiecewise_x.append(f0)\npiecewise_y.append(v2_headings[f0])\nf1=1595\ncam_headings[f0:f1]=linterp(f0,v2_headings[f0],f1,v2_headings[f1],frames[f0:f1])\nf0=f1\npiecewise_x.append(f0)\npiecewise_y.append(v2_headings[f0])\nf1=1627\ncam_headings[f0:f1]=v2_headings[f0:f1]\nf0=f1\npiecewise_x.append(f0)\npiecewise_y.append(v2_headings[f0])\nf1=1755\ncam_headings[f0:f1]=v2_headings[f0:f1]+linterp(f0,0,f1,-15,frames[f0:f1])\nf0=f1\npiecewise_x.append(f0)\npiecewise_y.append(v2_headings[f0]-15)\nf1=1823\ncam_headings[f0:f1]=v2_headings[f0:f1]+linterp(f0,-15,f1,-5,frames[f0:f1])\nf0=f1\npiecewise_x.append(f0)\npiecewise_y.append(v2_headings[f0]-5)\nf1=1900\ncam_headings[f0:f1]=v2_headings[f0:f1]+linterp(f0,-5,f1,90,frames[f0:f1])\nf0=f1\npiecewise_x.append(f0)\npiecewise_y.append(v2_headings[f0]+90)\nf1=frame3\ncam_headings[f0:f1]=v2_headings[f0]+90\nimport scipy.interpolate\ninterpol=scipy.interpolate.CubicSpline(piecewise_x,piecewise_y,bc_type='clamped')\nxs=np.arange(piecewise_x[0],piecewise_x[-1])\nplt.plot(frames,v2_headings,frames,cam_headings,piecewise_x,piecewise_y,'*',xs,interpol(xs))\nplt.show()\ncam_headings[piecewise_x[0]:piecewise_x[-1]]=interpol(xs)\nwith open(\"cam_headings.inc\",\"w\") as ouf:\n print(\"#declare cam_headings=array[%d] {\"%len(cam_headings),file=ouf)\n for i,cam_heading in enumerate(cam_headings):\n #print(\"/*%4d*/ %f,\"%(i,cam_heading),file=ouf)\n print(\"/*%4d*/ %f,\"%(i,cam_heading),file=ouf)\n print(\"}\",file=ouf)\n" ]
[ [ "numpy.arctan2", "numpy.arange", "matplotlib.pyplot.show" ] ]
taejoo/MLOps_Recipes032820
[ "0f135ae32034b0a08871a2189375c8a3f6222f9b" ]
[ "models/risk-model/train/train.py" ]
[ "import os\nimport sys\nimport argparse\n\nimport dotenv\nimport joblib\nimport pandas as pd\nfrom azureml.core import Run\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\n\nfrom azureml.core import Dataset\n\ndef main():\n\n model_name, dataset_name = getRuntimeArgs()\n dotenv.load_dotenv()\n\n run = Run.get_context()\n\n if run._run_id.startswith(\"_OfflineRun\"):\n run = None\n\n credit_data_df = None\n\n #Load data from Dataset or from local file(for offline runs)\n if run is None:\n dataset_filename = os.environ.get(\"DATASET_FILE_NAME\", )\n credit_data_df = pd.read_csv(\"dataset/\" +dataset_filename)\n else:\n dataset = Dataset.get_by_name(workspace=run.experiment.workspace, name=dataset_name)\n #dataset = run.input_datasets[dataset_name]\n credit_data_df = dataset.to_pandas_dataframe()\n\n clf = model_train(credit_data_df, run)\n\n #copying to \"outputs\" directory, automatically uploads it to azure ml\n output_dir = './outputs/'\n os.makedirs(output_dir, exist_ok=True)\n joblib.dump(value=clf, filename=output_dir+model_name)\n\n #run.upload_file(name=\"./outputs/\" + model_file_name, path_or_stream=model_file_name)\n\ndef model_train(credit_data_df, run):\n #credit_data_df = pd.read_csv(\"dataset/german_credit_data.csv\") # , nrows=200000, parse_dates=[\"LEG1_DEP_DATE_GMT\", \"LEG1_ARR_DATE_GMT\",\"LEG2_DEP_DATE_GMT\", \"LEG2_ARR_DATE_GMT\"])\n credit_data_df.drop(['Sno'], axis=1, inplace=True)\n\n y_raw = credit_data_df['Risk']\n X_raw = credit_data_df.drop('Risk', axis=1)\n #del credit_data_df\n\n categorical_features = X_raw.select_dtypes(include=['object']).columns\n numeric_features = X_raw.select_dtypes(include=['int64', 'float']).columns\n\n categorical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='constant', fill_value=\"missing\")),\n ('onehotencoder', OneHotEncoder(categories='auto', sparse=False))])\n\n numeric_transformer = Pipeline(steps=[\n # ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())])\n\n feature_engineering_pipeline = ColumnTransformer(\n transformers=[\n ('numeric', numeric_transformer, numeric_features),\n ('categorical', categorical_transformer, categorical_features)\n ], remainder=\"drop\")\n\n #Encode Labels\n le = LabelEncoder()\n encoded_y = le.fit_transform(y_raw)\n\n #Train test split\n X_train, X_test, y_train, y_test = train_test_split(X_raw, encoded_y, test_size=0.20, stratify=encoded_y, random_state=42)\n\n #Create sklearn pipeline\n lr_clf = Pipeline(steps=[('preprocessor', feature_engineering_pipeline),\n ('classifier', LogisticRegression(solver=\"lbfgs\"))])\n #Train the model\n lr_clf.fit(X_train, y_train)\n\n #Capture metrics\n train_acc = lr_clf.score(X_train, y_train)\n test_acc = lr_clf.score(X_test, y_test)\n print(\"training accuracy: %.3f\" % train_acc)\n print(\"test data accuracy: %.3f\" % test_acc)\n\n #Log to Azure ML (if not running in local test mode)\n if run is not None:\n run.log('Train accuracy', train_acc)\n run.log('Test accuracy', test_acc)\n\n return lr_clf\n\ndef getRuntimeArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument('--MODEL_NAME', type=str)\n parser.add_argument('--DATASET_NAME', type=str)\n args = parser.parse_args()\n return args.MODEL_NAME, args.DATASET_NAME\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "sklearn.preprocessing.OneHotEncoder", "sklearn.impute.SimpleImputer", "sklearn.model_selection.train_test_split", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.LabelEncoder", "sklearn.compose.ColumnTransformer" ] ]
michaelaye/pandas
[ "c6110e25b3eceb2f25022c2aa9ccea03c0b8b359" ]
[ "pandas/io/json.py" ]
[ "# pylint: disable-msg=E1101,W0613,W0603\n\nimport os\nimport copy\nfrom collections import defaultdict\nimport numpy as np\n\nimport pandas.json as _json\nfrom pandas.tslib import iNaT\nfrom pandas.compat import long, u\nfrom pandas import compat, isnull\nfrom pandas import Series, DataFrame, to_datetime\nfrom pandas.io.common import get_filepath_or_buffer\nfrom pandas.core.common import AbstractMethodError\nfrom pandas.formats.printing import pprint_thing\n\nloads = _json.loads\ndumps = _json.dumps\n\n# interface to/from\n\n\ndef to_json(path_or_buf, obj, orient=None, date_format='epoch',\n double_precision=10, force_ascii=True, date_unit='ms',\n default_handler=None):\n\n if isinstance(obj, Series):\n s = SeriesWriter(\n obj, orient=orient, date_format=date_format,\n double_precision=double_precision, ensure_ascii=force_ascii,\n date_unit=date_unit, default_handler=default_handler).write()\n elif isinstance(obj, DataFrame):\n s = FrameWriter(\n obj, orient=orient, date_format=date_format,\n double_precision=double_precision, ensure_ascii=force_ascii,\n date_unit=date_unit, default_handler=default_handler).write()\n else:\n raise NotImplementedError(\"'obj' should be a Series or a DataFrame\")\n\n if isinstance(path_or_buf, compat.string_types):\n with open(path_or_buf, 'w') as fh:\n fh.write(s)\n elif path_or_buf is None:\n return s\n else:\n path_or_buf.write(s)\n\n\nclass Writer(object):\n\n def __init__(self, obj, orient, date_format, double_precision,\n ensure_ascii, date_unit, default_handler=None):\n self.obj = obj\n\n if orient is None:\n orient = self._default_orient\n\n self.orient = orient\n self.date_format = date_format\n self.double_precision = double_precision\n self.ensure_ascii = ensure_ascii\n self.date_unit = date_unit\n self.default_handler = default_handler\n\n self.is_copy = None\n self._format_axes()\n\n def _format_axes(self):\n raise AbstractMethodError(self)\n\n def write(self):\n return dumps(\n self.obj,\n orient=self.orient,\n double_precision=self.double_precision,\n ensure_ascii=self.ensure_ascii,\n date_unit=self.date_unit,\n iso_dates=self.date_format == 'iso',\n default_handler=self.default_handler)\n\n\nclass SeriesWriter(Writer):\n _default_orient = 'index'\n\n def _format_axes(self):\n if not self.obj.index.is_unique and self.orient == 'index':\n raise ValueError(\"Series index must be unique for orient=\"\n \"'%s'\" % self.orient)\n\n\nclass FrameWriter(Writer):\n _default_orient = 'columns'\n\n def _format_axes(self):\n \"\"\" try to axes if they are datelike \"\"\"\n if not self.obj.index.is_unique and self.orient in (\n 'index', 'columns'):\n raise ValueError(\"DataFrame index must be unique for orient=\"\n \"'%s'.\" % self.orient)\n if not self.obj.columns.is_unique and self.orient in (\n 'index', 'columns', 'records'):\n raise ValueError(\"DataFrame columns must be unique for orient=\"\n \"'%s'.\" % self.orient)\n\n\ndef read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,\n convert_axes=True, convert_dates=True, keep_default_dates=True,\n numpy=False, precise_float=False, date_unit=None):\n \"\"\"\n Convert a JSON string to pandas object\n\n Parameters\n ----------\n path_or_buf : a valid JSON string or file-like, default: None\n The string could be a URL. Valid URL schemes include http, ftp, s3, and\n file. For file URLs, a host is expected. For instance, a local file\n could be ``file://localhost/path/to/table.json``\n\n orient\n\n * `Series`\n\n - default is ``'index'``\n - allowed values are: ``{'split','records','index'}``\n - The Series index must be unique for orient ``'index'``.\n\n * `DataFrame`\n\n - default is ``'columns'``\n - allowed values are: {'split','records','index','columns','values'}\n - The DataFrame index must be unique for orients 'index' and\n 'columns'.\n - The DataFrame columns must be unique for orients 'index',\n 'columns', and 'records'.\n\n * The format of the JSON string\n\n - split : dict like\n ``{index -> [index], columns -> [columns], data -> [values]}``\n - records : list like\n ``[{column -> value}, ... , {column -> value}]``\n - index : dict like ``{index -> {column -> value}}``\n - columns : dict like ``{column -> {index -> value}}``\n - values : just the values array\n\n typ : type of object to recover (series or frame), default 'frame'\n dtype : boolean or dict, default True\n If True, infer dtypes, if a dict of column to dtype, then use those,\n if False, then don't infer dtypes at all, applies only to the data.\n convert_axes : boolean, default True\n Try to convert the axes to the proper dtypes.\n convert_dates : boolean, default True\n List of columns to parse for dates; If True, then try to parse\n datelike columns default is True; a column label is datelike if\n\n * it ends with ``'_at'``,\n\n * it ends with ``'_time'``,\n\n * it begins with ``'timestamp'``,\n\n * it is ``'modified'``, or\n\n * it is ``'date'``\n\n keep_default_dates : boolean, default True\n If parsing dates, then parse the default datelike columns\n numpy : boolean, default False\n Direct decoding to numpy arrays. Supports numeric data only, but\n non-numeric column and index labels are supported. Note also that the\n JSON ordering MUST be the same for each term if numpy=True.\n precise_float : boolean, default False\n Set to enable usage of higher precision (strtod) function when\n decoding string to double values. Default (False) is to use fast but\n less precise builtin functionality\n date_unit : string, default None\n The timestamp unit to detect if converting dates. The default behaviour\n is to try and detect the correct precision, but if this is not desired\n then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,\n milliseconds, microseconds or nanoseconds respectively.\n\n Returns\n -------\n result : Series or DataFrame\n \"\"\"\n\n filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf)\n if isinstance(filepath_or_buffer, compat.string_types):\n try:\n exists = os.path.exists(filepath_or_buffer)\n\n # if the filepath is too long will raise here\n # 5874\n except (TypeError, ValueError):\n exists = False\n\n if exists:\n with open(filepath_or_buffer, 'r') as fh:\n json = fh.read()\n else:\n json = filepath_or_buffer\n elif hasattr(filepath_or_buffer, 'read'):\n json = filepath_or_buffer.read()\n else:\n json = filepath_or_buffer\n\n obj = None\n if typ == 'frame':\n obj = FrameParser(json, orient, dtype, convert_axes, convert_dates,\n keep_default_dates, numpy, precise_float,\n date_unit).parse()\n\n if typ == 'series' or obj is None:\n if not isinstance(dtype, bool):\n dtype = dict(data=dtype)\n obj = SeriesParser(json, orient, dtype, convert_axes, convert_dates,\n keep_default_dates, numpy, precise_float,\n date_unit).parse()\n\n return obj\n\n\nclass Parser(object):\n\n _STAMP_UNITS = ('s', 'ms', 'us', 'ns')\n _MIN_STAMPS = {\n 's': long(31536000),\n 'ms': long(31536000000),\n 'us': long(31536000000000),\n 'ns': long(31536000000000000)}\n\n def __init__(self, json, orient, dtype=True, convert_axes=True,\n convert_dates=True, keep_default_dates=False, numpy=False,\n precise_float=False, date_unit=None):\n self.json = json\n\n if orient is None:\n orient = self._default_orient\n\n self.orient = orient\n self.dtype = dtype\n\n if orient == \"split\":\n numpy = False\n\n if date_unit is not None:\n date_unit = date_unit.lower()\n if date_unit not in self._STAMP_UNITS:\n raise ValueError('date_unit must be one of %s' %\n (self._STAMP_UNITS,))\n self.min_stamp = self._MIN_STAMPS[date_unit]\n else:\n self.min_stamp = self._MIN_STAMPS['s']\n\n self.numpy = numpy\n self.precise_float = precise_float\n self.convert_axes = convert_axes\n self.convert_dates = convert_dates\n self.date_unit = date_unit\n self.keep_default_dates = keep_default_dates\n self.obj = None\n\n def check_keys_split(self, decoded):\n \"checks that dict has only the appropriate keys for orient='split'\"\n bad_keys = set(decoded.keys()).difference(set(self._split_keys))\n if bad_keys:\n bad_keys = \", \".join(bad_keys)\n raise ValueError(u(\"JSON data had unexpected key(s): %s\") %\n pprint_thing(bad_keys))\n\n def parse(self):\n\n # try numpy\n numpy = self.numpy\n if numpy:\n self._parse_numpy()\n\n else:\n self._parse_no_numpy()\n\n if self.obj is None:\n return None\n if self.convert_axes:\n self._convert_axes()\n self._try_convert_types()\n return self.obj\n\n def _convert_axes(self):\n \"\"\" try to convert axes \"\"\"\n for axis in self.obj._AXIS_NUMBERS.keys():\n new_axis, result = self._try_convert_data(\n axis, self.obj._get_axis(axis), use_dtypes=False,\n convert_dates=True)\n if result:\n setattr(self.obj, axis, new_axis)\n\n def _try_convert_types(self):\n raise AbstractMethodError(self)\n\n def _try_convert_data(self, name, data, use_dtypes=True,\n convert_dates=True):\n \"\"\" try to parse a ndarray like into a column by inferring dtype \"\"\"\n\n # don't try to coerce, unless a force conversion\n if use_dtypes:\n if self.dtype is False:\n return data, False\n elif self.dtype is True:\n pass\n\n else:\n\n # dtype to force\n dtype = (self.dtype.get(name)\n if isinstance(self.dtype, dict) else self.dtype)\n if dtype is not None:\n try:\n dtype = np.dtype(dtype)\n return data.astype(dtype), True\n except:\n return data, False\n\n if convert_dates:\n new_data, result = self._try_convert_to_date(data)\n if result:\n return new_data, True\n\n result = False\n\n if data.dtype == 'object':\n\n # try float\n try:\n data = data.astype('float64')\n result = True\n except:\n pass\n\n if data.dtype.kind == 'f':\n\n if data.dtype != 'float64':\n\n # coerce floats to 64\n try:\n data = data.astype('float64')\n result = True\n except:\n pass\n\n # do't coerce 0-len data\n if len(data) and (data.dtype == 'float' or data.dtype == 'object'):\n\n # coerce ints if we can\n try:\n new_data = data.astype('int64')\n if (new_data == data).all():\n data = new_data\n result = True\n except:\n pass\n\n # coerce ints to 64\n if data.dtype == 'int':\n\n # coerce floats to 64\n try:\n data = data.astype('int64')\n result = True\n except:\n pass\n\n return data, result\n\n def _try_convert_to_date(self, data):\n \"\"\" try to parse a ndarray like into a date column\n try to coerce object in epoch/iso formats and\n integer/float in epcoh formats, return a boolean if parsing\n was successful \"\"\"\n\n # no conversion on empty\n if not len(data):\n return data, False\n\n new_data = data\n if new_data.dtype == 'object':\n try:\n new_data = data.astype('int64')\n except:\n pass\n\n # ignore numbers that are out of range\n if issubclass(new_data.dtype.type, np.number):\n in_range = (isnull(new_data.values) | (new_data > self.min_stamp) |\n (new_data.values == iNaT))\n if not in_range.all():\n return data, False\n\n date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS\n for date_unit in date_units:\n try:\n new_data = to_datetime(new_data, errors='raise',\n unit=date_unit)\n except ValueError:\n continue\n except:\n break\n return new_data, True\n return data, False\n\n def _try_convert_dates(self):\n raise AbstractMethodError(self)\n\n\nclass SeriesParser(Parser):\n _default_orient = 'index'\n _split_keys = ('name', 'index', 'data')\n\n def _parse_no_numpy(self):\n\n json = self.json\n orient = self.orient\n if orient == \"split\":\n decoded = dict((str(k), v)\n for k, v in compat.iteritems(loads(\n json,\n precise_float=self.precise_float)))\n self.check_keys_split(decoded)\n self.obj = Series(dtype=None, **decoded)\n else:\n self.obj = Series(\n loads(json, precise_float=self.precise_float), dtype=None)\n\n def _parse_numpy(self):\n\n json = self.json\n orient = self.orient\n if orient == \"split\":\n decoded = loads(json, dtype=None, numpy=True,\n precise_float=self.precise_float)\n decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))\n self.check_keys_split(decoded)\n self.obj = Series(**decoded)\n elif orient == \"columns\" or orient == \"index\":\n self.obj = Series(*loads(json, dtype=None, numpy=True,\n labelled=True,\n precise_float=self.precise_float))\n else:\n self.obj = Series(loads(json, dtype=None, numpy=True,\n precise_float=self.precise_float))\n\n def _try_convert_types(self):\n if self.obj is None:\n return\n obj, result = self._try_convert_data(\n 'data', self.obj, convert_dates=self.convert_dates)\n if result:\n self.obj = obj\n\n\nclass FrameParser(Parser):\n _default_orient = 'columns'\n _split_keys = ('columns', 'index', 'data')\n\n def _parse_numpy(self):\n\n json = self.json\n orient = self.orient\n\n if orient == \"columns\":\n args = loads(json, dtype=None, numpy=True, labelled=True,\n precise_float=self.precise_float)\n if args:\n args = (args[0].T, args[2], args[1])\n self.obj = DataFrame(*args)\n elif orient == \"split\":\n decoded = loads(json, dtype=None, numpy=True,\n precise_float=self.precise_float)\n decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))\n self.check_keys_split(decoded)\n self.obj = DataFrame(**decoded)\n elif orient == \"values\":\n self.obj = DataFrame(loads(json, dtype=None, numpy=True,\n precise_float=self.precise_float))\n else:\n self.obj = DataFrame(*loads(json, dtype=None, numpy=True,\n labelled=True,\n precise_float=self.precise_float))\n\n def _parse_no_numpy(self):\n\n json = self.json\n orient = self.orient\n\n if orient == \"columns\":\n self.obj = DataFrame(\n loads(json, precise_float=self.precise_float), dtype=None)\n elif orient == \"split\":\n decoded = dict((str(k), v)\n for k, v in compat.iteritems(loads(\n json,\n precise_float=self.precise_float)))\n self.check_keys_split(decoded)\n self.obj = DataFrame(dtype=None, **decoded)\n elif orient == \"index\":\n self.obj = DataFrame(\n loads(json, precise_float=self.precise_float), dtype=None).T\n else:\n self.obj = DataFrame(\n loads(json, precise_float=self.precise_float), dtype=None)\n\n def _process_converter(self, f, filt=None):\n \"\"\" take a conversion function and possibly recreate the frame \"\"\"\n\n if filt is None:\n filt = lambda col, c: True\n\n needs_new_obj = False\n new_obj = dict()\n for i, (col, c) in enumerate(self.obj.iteritems()):\n if filt(col, c):\n new_data, result = f(col, c)\n if result:\n c = new_data\n needs_new_obj = True\n new_obj[i] = c\n\n if needs_new_obj:\n\n # possibly handle dup columns\n new_obj = DataFrame(new_obj, index=self.obj.index)\n new_obj.columns = self.obj.columns\n self.obj = new_obj\n\n def _try_convert_types(self):\n if self.obj is None:\n return\n if self.convert_dates:\n self._try_convert_dates()\n\n self._process_converter(\n lambda col, c: self._try_convert_data(col, c, convert_dates=False))\n\n def _try_convert_dates(self):\n if self.obj is None:\n return\n\n # our columns to parse\n convert_dates = self.convert_dates\n if convert_dates is True:\n convert_dates = []\n convert_dates = set(convert_dates)\n\n def is_ok(col):\n \"\"\" return if this col is ok to try for a date parse \"\"\"\n if not isinstance(col, compat.string_types):\n return False\n\n col_lower = col.lower()\n if (col_lower.endswith('_at') or\n col_lower.endswith('_time') or\n col_lower == 'modified' or\n col_lower == 'date' or\n col_lower == 'datetime' or\n col_lower.startswith('timestamp')):\n return True\n return False\n\n self._process_converter(\n lambda col, c: self._try_convert_to_date(c),\n lambda col, c: ((self.keep_default_dates and is_ok(col)) or\n col in convert_dates))\n\n# ---------------------------------------------------------------------\n# JSON normalization routines\n\n\ndef nested_to_record(ds, prefix=\"\", level=0):\n \"\"\"a simplified json_normalize\n\n converts a nested dict into a flat dict (\"record\"), unlike json_normalize,\n it does not attempt to extract a subset of the data.\n\n Parameters\n ----------\n ds : dict or list of dicts\n prefix: the prefix, optional, default: \"\"\n level: the number of levels in the jason string, optional, default: 0\n\n Returns\n -------\n d - dict or list of dicts, matching `ds`\n\n Examples\n --------\n\n IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),\n nested=dict(e=dict(c=1,d=2),d=2)))\n Out[52]:\n {'dict1.c': 1,\n 'dict1.d': 2,\n 'flat1': 1,\n 'nested.d': 2,\n 'nested.e.c': 1,\n 'nested.e.d': 2}\n \"\"\"\n singleton = False\n if isinstance(ds, dict):\n ds = [ds]\n singleton = True\n\n new_ds = []\n for d in ds:\n\n new_d = copy.deepcopy(d)\n for k, v in d.items():\n # each key gets renamed with prefix\n if level == 0:\n newkey = str(k)\n else:\n newkey = prefix + '.' + str(k)\n\n # only dicts gets recurse-flattend\n # only at level>1 do we rename the rest of the keys\n if not isinstance(v, dict):\n if level != 0: # so we skip copying for top level, common case\n v = new_d.pop(k)\n new_d[newkey] = v\n continue\n else:\n v = new_d.pop(k)\n new_d.update(nested_to_record(v, newkey, level + 1))\n new_ds.append(new_d)\n\n if singleton:\n return new_ds[0]\n return new_ds\n\n\ndef json_normalize(data, record_path=None, meta=None,\n meta_prefix=None,\n record_prefix=None):\n \"\"\"\n \"Normalize\" semi-structured JSON data into a flat table\n\n Parameters\n ----------\n data : dict or list of dicts\n Unserialized JSON objects\n record_path : string or list of strings, default None\n Path in each object to list of records. If not passed, data will be\n assumed to be an array of records\n meta : list of paths (string or list of strings), default None\n Fields to use as metadata for each record in resulting table\n record_prefix : string, default None\n If True, prefix records with dotted (?) path, e.g. foo.bar.field if\n path to records is ['foo', 'bar']\n meta_prefix : string, default None\n\n Returns\n -------\n frame : DataFrame\n\n Examples\n --------\n\n >>> data = [{'state': 'Florida',\n ... 'shortname': 'FL',\n ... 'info': {\n ... 'governor': 'Rick Scott'\n ... },\n ... 'counties': [{'name': 'Dade', 'population': 12345},\n ... {'name': 'Broward', 'population': 40000},\n ... {'name': 'Palm Beach', 'population': 60000}]},\n ... {'state': 'Ohio',\n ... 'shortname': 'OH',\n ... 'info': {\n ... 'governor': 'John Kasich'\n ... },\n ... 'counties': [{'name': 'Summit', 'population': 1234},\n ... {'name': 'Cuyahoga', 'population': 1337}]}]\n >>> from pandas.io.json import json_normalize\n >>> result = json_normalize(data, 'counties', ['state', 'shortname',\n ... ['info', 'governor']])\n >>> result\n name population info.governor state shortname\n 0 Dade 12345 Rick Scott Florida FL\n 1 Broward 40000 Rick Scott Florida FL\n 2 Palm Beach 60000 Rick Scott Florida FL\n 3 Summit 1234 John Kasich Ohio OH\n 4 Cuyahoga 1337 John Kasich Ohio OH\n\n \"\"\"\n def _pull_field(js, spec):\n result = js\n if isinstance(spec, list):\n for field in spec:\n result = result[field]\n else:\n result = result[spec]\n\n return result\n\n # A bit of a hackjob\n if isinstance(data, dict):\n data = [data]\n\n if record_path is None:\n if any([isinstance(x, dict) for x in compat.itervalues(data[0])]):\n # naive normalization, this is idempotent for flat records\n # and potentially will inflate the data considerably for\n # deeply nested structures:\n # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}\n #\n # TODO: handle record value which are lists, at least error\n # reasonably\n data = nested_to_record(data)\n return DataFrame(data)\n elif not isinstance(record_path, list):\n record_path = [record_path]\n\n if meta is None:\n meta = []\n elif not isinstance(meta, list):\n meta = [meta]\n\n for i, x in enumerate(meta):\n if not isinstance(x, list):\n meta[i] = [x]\n\n # Disastrously inefficient for now\n records = []\n lengths = []\n\n meta_vals = defaultdict(list)\n meta_keys = ['.'.join(val) for val in meta]\n\n def _recursive_extract(data, path, seen_meta, level=0):\n if len(path) > 1:\n for obj in data:\n for val, key in zip(meta, meta_keys):\n if level + 1 == len(val):\n seen_meta[key] = _pull_field(obj, val[-1])\n\n _recursive_extract(obj[path[0]], path[1:],\n seen_meta, level=level + 1)\n else:\n for obj in data:\n recs = _pull_field(obj, path[0])\n\n # For repeating the metadata later\n lengths.append(len(recs))\n\n for val, key in zip(meta, meta_keys):\n if level + 1 > len(val):\n meta_val = seen_meta[key]\n else:\n meta_val = _pull_field(obj, val[level:])\n meta_vals[key].append(meta_val)\n\n records.extend(recs)\n\n _recursive_extract(data, record_path, {}, level=0)\n\n result = DataFrame(records)\n\n if record_prefix is not None:\n result.rename(columns=lambda x: record_prefix + x, inplace=True)\n\n # Data types, a problem\n for k, v in compat.iteritems(meta_vals):\n if meta_prefix is not None:\n k = meta_prefix + k\n\n if k in result:\n raise ValueError('Conflicting metadata name %s, '\n 'need distinguishing prefix ' % k)\n\n result[k] = np.array(v).repeat(lengths)\n\n return result\n" ]
[ [ "pandas.to_datetime", "pandas.Series", "pandas.compat.u", "pandas.isnull", "pandas.core.common.AbstractMethodError", "pandas.formats.printing.pprint_thing", "pandas.DataFrame", "numpy.dtype", "pandas.io.common.get_filepath_or_buffer", "pandas.compat.iteritems", "pandas.compat.itervalues", "numpy.array", "pandas.compat.long" ] ]
Kedo-Aleksei/statsmodels
[ "d88ef76d6c05f3c77b24500514d9e9c249429376" ]
[ "statsmodels/stats/multivariate.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 5 14:48:19 2017\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\"\"\"\n\nimport numpy as np\nfrom scipy import stats\n\nfrom statsmodels.stats.moment_helpers import cov2corr\nfrom statsmodels.stats.base import HolderTuple\nfrom statsmodels.tools.validation import array_like\n\n# shortcut function\nlogdet = lambda x: np.linalg.slogdet(x)[1] # noqa: E731\n\n\ndef test_mvmean(data, mean_null=0, return_results=True):\n \"\"\"Hotellings test for multivariate mean in one sample\n\n Parameters\n ----------\n data : array_like\n data with observations in rows and variables in columns\n mean_null : array_like\n mean of the multivariate data under the null hypothesis\n return_results : bool\n If true, then a results instance is returned. If False, then only\n the test statistic and pvalue are returned.\n\n Returns\n -------\n results : instance of a results class with attributes\n statistic, pvalue, t2 and df\n (statistic, pvalue) : tuple\n If return_results is false, then only the test statistic and the\n pvalue are returned.\n\n \"\"\"\n x = np.asarray(data)\n nobs, k_vars = x.shape\n mean = x.mean(0)\n cov = np.cov(x, rowvar=False, ddof=1)\n diff = mean - mean_null\n t2 = nobs * diff.dot(np.linalg.solve(cov, diff))\n factor = (nobs - 1) * k_vars / (nobs - k_vars)\n statistic = t2 / factor\n df = (k_vars, nobs - k_vars)\n pvalue = stats.f.sf(statistic, df[0], df[1])\n if return_results:\n res = HolderTuple(statistic=statistic,\n pvalue=pvalue,\n df=df,\n t2=t2,\n distr=\"F\")\n return res\n else:\n return statistic, pvalue\n\ndef test_mvmean_2indep(data1, data2):\n \"\"\"Hotellings test for multivariate mean in two samples\n\n Parameters\n ----------\n data1 : array_like\n first sample data with observations in rows and variables in columns\n data2 : array_like\n second sample data with observations in rows and variables in columns\n\n Returns\n -------\n results : instance of a results class with attributes\n statistic, pvalue, t2 and df\n \"\"\"\n x1 = array_like(data1, \"x1\", ndim=2)\n x2 = array_like(data2, \"x2\", ndim=2)\n nobs_x, k_vars = x1.shape\n nobs_y, k_vars = x2.shape\n mean_x = x1.mean(0)\n mean_y = x2.mean(0)\n cov_x = np.cov(x1, rowvar=False, ddof=1)\n cov_y = np.cov(x2, rowvar=False, ddof=1)\n nobs_t = nobs_x + nobs_y\n combined_cov = ((nobs_x - 1) * cov_x + (nobs_y - 1) * cov_y) / (nobs_t - 2)\n diff = mean_x - mean_y\n t2 = (nobs_x * nobs_y) / nobs_t * diff @ (np.linalg.solve(combined_cov, diff))\n factor = ((nobs_t - 2) * k_vars) / (nobs_t - k_vars - 1)\n statistic = t2 / factor\n df = (k_vars, nobs_t - 1 - k_vars)\n pvalue = stats.f.sf(statistic, df[0], df[1])\n return HolderTuple(statistic=statistic,\n pvalue=pvalue,\n df=df,\n t2=t2,\n distr=\"F\")\n\ndef confint_mvmean(data, lin_transf=None, alpha=0.5, simult=False):\n \"\"\"Confidence interval for linear transformation of a multivariate mean\n\n Either pointwise or simultaneous confidence intervals are returned.\n\n Parameters\n ----------\n data : array_like\n data with observations in rows and variables in columns\n lin_transf : array_like or None\n The linear transformation or contrast matrix for transforming the\n vector of means. If this is None, then the identity matrix is used\n which specifies the means themselves.\n alpha : float in (0, 1)\n confidence level for the confidence interval, commonly used is\n alpha=0.05.\n simult: bool\n If ``simult`` is False (default), then the pointwise confidence\n interval is returned.\n Otherwise, a simultaneous confidence interval is returned.\n Warning: additional simultaneous confidence intervals might be added\n and the default for those might change.\n\n Returns\n -------\n low : ndarray\n lower confidence bound on the linear transformed\n upp : ndarray\n upper confidence bound on the linear transformed\n values : ndarray\n mean or their linear transformation, center of the confidence region\n\n Notes\n -----\n Pointwise confidence interval is based on Johnson and Wichern\n equation (5-21) page 224.\n\n Simultaneous confidence interval is based on Johnson and Wichern\n Result 5.3 page 225.\n This looks like Sheffe simultaneous confidence intervals.\n\n Bonferroni corrected simultaneous confidence interval might be added in\n future\n\n References\n ----------\n Johnson, Richard A., and Dean W. Wichern. 2007. Applied Multivariate\n Statistical Analysis. 6th ed. Upper Saddle River, N.J: Pearson Prentice\n Hall.\n \"\"\"\n x = np.asarray(data)\n nobs, k_vars = x.shape\n if lin_transf is None:\n lin_transf = np.eye(k_vars)\n mean = x.mean(0)\n cov = np.cov(x, rowvar=False, ddof=0)\n\n ci = confint_mvmean_fromstats(mean, cov, nobs, lin_transf=lin_transf,\n alpha=alpha, simult=simult)\n return ci\n\n\ndef confint_mvmean_fromstats(mean, cov, nobs, lin_transf=None, alpha=0.05,\n simult=False):\n \"\"\"Confidence interval for linear transformation of a multivariate mean\n\n Either pointwise or simultaneous conficence intervals are returned.\n Data is provided in the form of summary statistics, mean, cov, nobs.\n\n Parameters\n ----------\n mean : ndarray\n cov : ndarray\n nobs : int\n lin_transf : array_like or None\n The linear transformation or contrast matrix for transforming the\n vector of means. If this is None, then the identity matrix is used\n which specifies the means themselves.\n alpha : float in (0, 1)\n confidence level for the confidence interval, commonly used is\n alpha=0.05.\n simult: bool\n If simult is False (default), then pointwise confidence interval is\n returned.\n Otherwise, a simultaneous confidence interval is returned.\n Warning: additional simultaneous confidence intervals might be added\n and the default for those might change.\n\n Notes\n -----\n Pointwise confidence interval is based on Johnson and Wichern\n equation (5-21) page 224.\n\n Simultaneous confidence interval is based on Johnson and Wichern\n Result 5.3 page 225.\n This looks like Sheffe simultaneous confidence intervals.\n\n Bonferroni corrected simultaneous confidence interval might be added in\n future\n\n References\n ----------\n Johnson, Richard A., and Dean W. Wichern. 2007. Applied Multivariate\n Statistical Analysis. 6th ed. Upper Saddle River, N.J: Pearson Prentice\n Hall.\n\n \"\"\"\n mean = np.asarray(mean)\n cov = np.asarray(cov)\n c = np.atleast_2d(lin_transf)\n k_vars = len(mean)\n\n if simult is False:\n values = c.dot(mean)\n quad_form = (c * cov.dot(c.T).T).sum(1)\n df = nobs - 1\n t_critval = stats.t.isf(alpha / 2, df)\n ci_diff = np.sqrt(quad_form / df) * t_critval\n low = values - ci_diff\n upp = values + ci_diff\n else:\n values = c.dot(mean)\n quad_form = (c * cov.dot(c.T).T).sum(1)\n factor = (nobs - 1) * k_vars / (nobs - k_vars) / nobs\n df = (k_vars, nobs - k_vars)\n f_critval = stats.f.isf(alpha, df[0], df[1])\n ci_diff = np.sqrt(factor * quad_form * f_critval)\n low = values - ci_diff\n upp = values + ci_diff\n\n return low, upp, values # , (f_critval, factor, quad_form, df)\n\n\n\"\"\"\nCreated on Tue Nov 7 13:22:44 2017\n\nAuthor: Josef Perktold\n\n\nReferences\n----------\nStata manual for mvtest covariances\nRencher and Christensen 2012\nBartlett 1954\n\nStata refers to Rencher and Christensen for the formulas. Those correspond\nto the formula collection in Bartlett 1954 for several of them.\n\n\n\"\"\" # pylint: disable=W0105\n\n\ndef cov_test(cov, nobs, cov_null):\n \"\"\"One sample hypothesis test for covariance equal to null covariance\n\n The Null hypothesis is that cov = cov_null, against the alternative that\n it is not equal to cov_null\n\n Parameters\n ----------\n cov : array_like\n Covariance matrix of the data, estimated with denominator ``(N - 1)``,\n i.e. `ddof=1`.\n nobs : int\n number of observations used in the estimation of the covariance\n cov_null : nd_array\n covariance under the null hypothesis\n\n Returns\n -------\n res : instance of HolderTuple\n results with ``statistic, pvalue`` and other attributes like ``df``\n\n References\n ----------\n Bartlett, M. S. 1954. “A Note on the Multiplying Factors for Various Χ2\n Approximations.” Journal of the Royal Statistical Society. Series B\n (Methodological) 16 (2): 296–98.\n\n Rencher, Alvin C., and William F. Christensen. 2012. Methods of\n Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and\n Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.\n https://doi.org/10.1002/9781118391686.\n\n StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.\n Stata Press Publication.\n\n \"\"\"\n # using Stata formulas where cov_sample use nobs in denominator\n # Bartlett 1954 has fewer terms\n\n S = np.asarray(cov) * (nobs - 1) / nobs\n S0 = np.asarray(cov_null)\n k = cov.shape[0]\n n = nobs\n\n fact = nobs - 1.\n fact *= 1 - (2 * k + 1 - 2 / (k + 1)) / (6 * (n - 1) - 1)\n fact2 = logdet(S0) - logdet(n / (n - 1) * S)\n fact2 += np.trace(n / (n - 1) * np.linalg.solve(S0, S)) - k\n statistic = fact * fact2\n df = k * (k + 1) / 2\n pvalue = stats.chi2.sf(statistic, df)\n return HolderTuple(statistic=statistic,\n pvalue=pvalue,\n df=df,\n distr=\"chi2\",\n null=\"equal value\",\n cov_null=cov_null\n )\n\n\ndef cov_test_spherical(cov, nobs):\n r\"\"\"One sample hypothesis test that covariance matrix is spherical\n\n The Null and alternative hypotheses are\n\n $H0 : \\Sigma = \\sigma I \\\\\n H1 : \\Sigma \\neq \\sigma I$\n\n where $\\sigma$ is the common variances with unspecified value.\n\n Parameters\n ----------\n cov : array_like\n Covariance matrix of the data, estimated with denominator ``(N - 1)``,\n i.e. `ddof=1`.\n nobs : int\n number of observations used in the estimation of the covariance\n\n Returns\n -------\n res : instance of HolderTuple\n results with ``statistic, pvalue`` and other attributes like ``df``\n\n References\n ----------\n Bartlett, M. S. 1954. “A Note on the Multiplying Factors for Various Χ2\n Approximations.” Journal of the Royal Statistical Society. Series B\n (Methodological) 16 (2): 296–98.\n\n Rencher, Alvin C., and William F. Christensen. 2012. Methods of\n Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and\n Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.\n https://doi.org/10.1002/9781118391686.\n\n StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.\n Stata Press Publication.\n \"\"\"\n\n # unchanged Stata formula, but denom is cov cancels, AFAICS\n # Bartlett 1954 correction factor in IIIc\n cov = np.asarray(cov)\n k = cov.shape[0]\n\n statistic = nobs - 1 - (2 * k**2 + k + 2) / (6 * k)\n statistic *= k * np.log(np.trace(cov)) - logdet(cov) - k * np.log(k)\n df = k * (k + 1) / 2 - 1\n pvalue = stats.chi2.sf(statistic, df)\n return HolderTuple(statistic=statistic,\n pvalue=pvalue,\n df=df,\n distr=\"chi2\",\n null=\"spherical\"\n )\n\n\ndef cov_test_diagonal(cov, nobs):\n r\"\"\"One sample hypothesis test that covariance matrix is diagonal matrix.\n\n The Null and alternative hypotheses are\n\n $H0 : \\Sigma = diag(\\sigma_i) \\\\\n H1 : \\Sigma \\neq diag(\\sigma_i)$\n\n where $\\sigma_i$ are the variances with unspecified values.\n\n Parameters\n ----------\n cov : array_like\n Covariance matrix of the data, estimated with denominator ``(N - 1)``,\n i.e. `ddof=1`.\n nobs : int\n number of observations used in the estimation of the covariance\n\n Returns\n -------\n res : instance of HolderTuple\n results with ``statistic, pvalue`` and other attributes like ``df``\n\n References\n ----------\n Rencher, Alvin C., and William F. Christensen. 2012. Methods of\n Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and\n Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.\n https://doi.org/10.1002/9781118391686.\n\n StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.\n Stata Press Publication.\n \"\"\"\n cov = np.asarray(cov)\n k = cov.shape[0]\n R = cov2corr(cov)\n\n statistic = -(nobs - 1 - (2 * k + 5) / 6) * logdet(R)\n df = k * (k - 1) / 2\n pvalue = stats.chi2.sf(statistic, df)\n return HolderTuple(statistic=statistic,\n pvalue=pvalue,\n df=df,\n distr=\"chi2\",\n null=\"diagonal\"\n )\n\n\ndef _get_blocks(mat, block_len):\n \"\"\"get diagonal blocks from matrix\n \"\"\"\n k = len(mat)\n idx = np.cumsum(block_len)\n if idx[-1] == k:\n idx = idx[:-1]\n elif idx[-1] > k:\n raise ValueError(\"sum of block_len larger than shape of mat\")\n else:\n # allow one missing block that is the remainder\n pass\n idx_blocks = np.split(np.arange(k), idx)\n blocks = []\n for ii in idx_blocks:\n blocks.append(mat[ii[:, None], ii])\n return blocks, idx_blocks\n\n\ndef cov_test_blockdiagonal(cov, nobs, block_len):\n r\"\"\"One sample hypothesis test that covariance is block diagonal.\n\n The Null and alternative hypotheses are\n\n $H0 : \\Sigma = diag(\\Sigma_i) \\\\\n H1 : \\Sigma \\neq diag(\\Sigma_i)$\n\n where $\\Sigma_i$ are covariance blocks with unspecified values.\n\n Parameters\n ----------\n cov : array_like\n Covariance matrix of the data, estimated with denominator ``(N - 1)``,\n i.e. `ddof=1`.\n nobs : int\n number of observations used in the estimation of the covariance\n block_len : list\n list of length of each square block\n\n Returns\n -------\n res : instance of HolderTuple\n results with ``statistic, pvalue`` and other attributes like ``df``\n\n References\n ----------\n Rencher, Alvin C., and William F. Christensen. 2012. Methods of\n Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and\n Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.\n https://doi.org/10.1002/9781118391686.\n\n StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.\n Stata Press Publication.\n \"\"\"\n cov = np.asarray(cov)\n cov_blocks = _get_blocks(cov, block_len)[0]\n k = cov.shape[0]\n k_blocks = [c.shape[0] for c in cov_blocks]\n if k != sum(k_blocks):\n msg = \"sample covariances and blocks do not have matching shape\"\n raise ValueError(msg)\n logdet_blocks = sum(logdet(c) for c in cov_blocks)\n a2 = k**2 - sum(ki**2 for ki in k_blocks)\n a3 = k**3 - sum(ki**3 for ki in k_blocks)\n\n statistic = (nobs - 1 - (2 * a3 + 3 * a2) / (6. * a2))\n statistic *= logdet_blocks - logdet(cov)\n\n df = a2 / 2\n pvalue = stats.chi2.sf(statistic, df)\n return HolderTuple(statistic=statistic,\n pvalue=pvalue,\n df=df,\n distr=\"chi2\",\n null=\"block-diagonal\"\n )\n\n\ndef cov_test_oneway(cov_list, nobs_list):\n r\"\"\"Multiple sample hypothesis test that covariance matrices are equal.\n\n This is commonly known as Box-M test\n\n The Null and alternative hypotheses are\n\n $H0 : \\Sigma_i = \\Sigma_j for all i and j \\\\\n H1 : \\Sigma_i \\neq diag(\\Sigma_j) for at least one i and j$\n\n where $\\Sigma_i$ is the covariance of sample $i$.\n\n Parameters\n ----------\n cov_list : list of array_like\n Covariance matrices of the sample, estimated with denominator\n ``(N - 1)``, i.e. `ddof=1`.\n nobs_list : list\n List of the number of observations used in the estimation of the\n covariance for each sample.\n\n Returns\n -------\n res : instance of HolderTuple\n Results contains test statistic and pvalues for both chisquare and F\n distribution based tests, identified by the name ending \"_chi2\" and\n \"_f\".\n Attributes ``statistic, pvalue`` refer to the F-test version.\n\n Notes\n -----\n approximations to distribution of test statistic is by Box\n\n References\n ----------\n Rencher, Alvin C., and William F. Christensen. 2012. Methods of\n Multivariate Analysis: Rencher/Methods. Wiley Series in Probability and\n Statistics. Hoboken, NJ, USA: John Wiley & Sons, Inc.\n https://doi.org/10.1002/9781118391686.\n\n StataCorp, L. P. Stata Multivariate Statistics: Reference Manual.\n Stata Press Publication.\n \"\"\"\n # Note stata uses nobs in cov, this uses nobs - 1\n cov_list = list(map(np.asarray, cov_list))\n m = len(cov_list)\n nobs = sum(nobs_list) # total number of observations\n k = cov_list[0].shape[0]\n\n cov_pooled = sum((n - 1) * c for (n, c) in zip(nobs_list, cov_list))\n cov_pooled /= (nobs - m)\n stat0 = (nobs - m) * logdet(cov_pooled)\n stat0 -= sum((n - 1) * logdet(c) for (n, c) in zip(nobs_list, cov_list))\n\n # Box's chi2\n c1 = sum(1 / (n - 1) for n in nobs_list) - 1 / (nobs - m)\n c1 *= (2 * k*k + 3 * k - 1) / (6 * (k + 1) * (m - 1))\n df_chi2 = (m - 1) * k * (k + 1) / 2\n statistic_chi2 = (1 - c1) * stat0\n pvalue_chi2 = stats.chi2.sf(statistic_chi2, df_chi2)\n\n c2 = sum(1 / (n - 1)**2 for n in nobs_list) - 1 / (nobs - m)**2\n c2 *= (k - 1) * (k + 2) / (6 * (m - 1))\n a1 = df_chi2\n a2 = (a1 + 2) / abs(c2 - c1**2)\n b1 = (1 - c1 - a1 / a2) / a1\n b2 = (1 - c1 + 2 / a2) / a2\n if c2 > c1**2:\n statistic_f = b1 * stat0\n else:\n tmp = b2 * stat0\n statistic_f = a2 / a1 * tmp / (1 + tmp)\n print(\"in branch 2\")\n df_f = (a1, a2)\n pvalue_f = stats.f.sf(statistic_f, *df_f)\n return HolderTuple(statistic=statistic_f, # name convention, using F here\n pvalue=pvalue_f, # name convention, using F here\n statistic_base=stat0,\n statistic_chi2=statistic_chi2,\n pvalue_chi2=pvalue_chi2,\n df_chi2=df_chi2,\n distr_chi2='chi2',\n statistic_f=statistic_f,\n pvalue_f=pvalue_f,\n df_f=df_f,\n distr_f='F')\n" ]
[ [ "numpy.log", "numpy.linalg.solve", "numpy.sqrt", "numpy.asarray", "scipy.stats.chi2.sf", "numpy.linalg.slogdet", "numpy.eye", "numpy.arange", "numpy.cumsum", "numpy.trace", "scipy.stats.t.isf", "numpy.atleast_2d", "numpy.cov", "scipy.stats.f.isf", "scipy.stats.f.sf" ] ]
Nodmgatall/pytorch-A3C
[ "ea418a2677c19350e9b684f9c13725f3f280bb7b" ]
[ "plot.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport re\n\narr = []\nshortForms = [\"gu\", \"g\",\"me\",\"mes\",\"wc\"]\nif len(sys.argv) == 1:\n arr = files;\n\nelif sys.argv[1].isdigit():\n arr = sys.argv[2:]\nelse:\n arr = sys.argv[1:]\n\nfor x in arr:\n n = [float(s) for s in re.findall(r'-?\\d+\\.?\\d*', x)]\n res = np.load(x)\n res = res / n[-2]\n _l = [str(x[0]) + \"=\"+str(x[1]) for x in zip( shortForms,n)]\n del _l[2]\n plt.plot(res,label=\" \".join(_l))\n#plt.axis([ 0, 10000,-10,0])\nplt.legend(bbox_to_anchor=(1,1), loc=\"lower center\")\nplt.ylabel('Moving average ep reward')\nplt.xlabel('Step')\nplt.tight_layout() \nplt.savefig(\"outSingle.png\")\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "numpy.load", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
sharksmhi/sharkpylib
[ "2a1d3cf3c15729e50525ab8da5920b6f9bb3faf2" ]
[ "sharkpylib/ices/ices.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2018 SMHI, Swedish Meteorological and Hydrological Institute\n# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).\n\"\"\"\nCreated on Thu Aug 30 15:30:28 2018\n\n@author:\n\"\"\"\n\nimport os\nimport codecs\nimport datetime\n\ntry:\n import pandas as pd\nexcept:\n pass\n\nfrom .. import mappinglib as mapping\n\n\nclass CodeList(dict): \n def __init__(self, file_path=False): \n if not file_path:\n file_path = os.path.dirname(os.path.abspath(__file__)) + '/ices_codelist.txt' \n \n self.file_path = file_path\n self.df = pd.read_csv(file_path, sep='\\t', encoding='cp1252', dtype=str) \n\n\n #==========================================================================\n def get_mapping_dict(self, f='ices_code', t='ices_description'): \n return dict(zip(self.df[f], self.df[t])) \n\n\n\n#==============================================================================\n#==============================================================================\nclass ImportMapping(dict): \n def __init__(self, file_path=False): \n if not file_path:\n file_path = os.path.dirname(os.path.abspath(__file__)) + '/ices_import_contaminants.txt' \n \n self.file_path = file_path\n self.df = pd.read_csv(file_path, sep='\\t', encoding='cp1252', dtype=str)\n self.df.fillna('', inplace=True)\n self.df['column'] = self.df['column'].apply(self._convert_to_int)\n self.df['parent_code'] = self.df['parent_code'].apply(self._convert_to_double_digit_string)\n self.df['ices_code'] = self.df['ices_code'].apply(self._convert_to_double_digit_string) \n \n \n #==========================================================================\n def _convert_to_int(self, x):\n try:\n x = int(x)\n except:\n pass\n return x\n \n \n #==========================================================================\n def _convert_to_double_digit_string(self, x):\n try:\n x = int(x)\n x = str(x).rjust(2, '0')\n except:\n pass\n return x\n \n \n #==========================================================================\n def is_parent(self, item): \n \"\"\"\n Checks if item is present in the parent_code-column. \n \"\"\"\n if len(self.df.loc[self.df['parent_code']==item, :]): \n return True\n return False\n \n \n #==========================================================================\n def get_internal_name_to_column_dict(self, parent=False): \n if parent:\n df = self.df.loc[self.df['parent_code']==parent, :] \n return dict(zip(df['internal_name'], df['column'])) \n else: \n return_dict = {}\n for parent_code in set(self.df['parent_code'].values):\n if not parent_code:\n continue\n df = self.df.loc[self.df['parent_code']==parent_code, :] \n return_dict[parent_code] = dict(zip(df['internal_name'], df['column']))\n return return_dict \n \n \n #==========================================================================\n def get_mapping_dict(self, f='ices_code', t='internal_name'): \n return dict(zip(self.df[f], self.df[t]))\n \n \n #==========================================================================\n def get_parameter_list(self):\n return list(self.df.loc[self.df['parent_code'] != '', 'internal_name'])\n \n\n\n#==============================================================================\n#==============================================================================\nclass ICEScontaminants(): \n def __init__(self, file_path, sep=',', **kwargs): \n self.import_mapping = ImportMapping(file_path=kwargs.get('ices_import_mapping_file_path'))\n self.codelist = CodeList(file_path=kwargs.get('ices_codelist_file_path'))\n \n self.mapping_parents = self.import_mapping.get_internal_name_to_column_dict() \n self.mapping_label = self.codelist.get_mapping_dict()\n \n self.parameter_list = self.import_mapping.get_parameter_list() \n \n \n \n self.data_matrix = []\n if type(file_path) == str:\n self._add_file(file_path, sep=sep, **kwargs)\n else:\n for f in file_path:\n self._add_file(f, sep=sep, **kwargs)\n\n self.row_df = pd.DataFrame(self.data_matrix, columns=self.parameter_list)\n \n # Convert columns\n self.row_df['SDATE'] = self.row_df['SDATE'].apply(mapping.split_date)\n self.row_df['LATIT'] = self.row_df['LATIT'].apply(mapping.strip_position)\n self.row_df['LONGI'] = self.row_df['LONGI'].apply(mapping.strip_position)\n\n self.row_df['time'] = pd.to_datetime(self.row_df['SDATE'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d')))\n\n # Add source column\n self.row_df['source'] = 'ices'\n \n self._add_id_column(**kwargs)\n \n print('\\n== DONE ==')\n \n \n #==========================================================================\n def _add_id_column(self, **kwargs):\n if kwargs.get('include_time_in_id', True):\n self.row_df['id'] = self.row_df['SDATE'].astype(str) + '_' + \\\n self.row_df['STIME'].astype(str) + '_' + \\\n self.row_df['LATIT'].apply(lambda x: x[:kwargs.get('id_pos_precision', 6)]).astype(str) + '_' + \\\n self.row_df['LONGI'].apply(lambda x: x[:kwargs.get('id_pos_precision', 6)]).astype(str)\n\n self.row_df['station_id'] = self.row_df['SDATE'].astype(str) + '_' + \\\n self.row_df['STIME'].astype(str) + '_' + \\\n self.row_df['STATN']\n else:\n self.row_df['id'] = self.row_df['SDATE'].astype(str) + '_' + \\\n self.row_df['LATIT'].apply(lambda x: x[:kwargs.get('id_pos_precision', 6)]).astype(str) + '_' + \\\n self.row_df['LONGI'].apply(lambda x: x[:kwargs.get('id_pos_precision', 6)]).astype(str)\n\n self.row_df['station_id'] = self.row_df['SDATE'].astype(str) + '_' + \\\n self.row_df['STATN']\n\n\n #==========================================================================\n def _add_file(self, file_path, sep=',', **kwargs): \n self.line_data = {}\n # print('utf8')\n with codecs.open(file_path, encoding='utf8') as fid:\n for line in fid: \n if not line.strip():\n continue \n split_line = line.split(sep)\n code = split_line[0] \n \n # Check if code is a parent code. If so, information should be retrieved from the row \n if self.mapping_parents.get(code): \n mapping_dict = self.mapping_parents.get(code)\n for item, col in mapping_dict.items():\n value = split_line[col]\n self.line_data[item] = self.mapping_label.get(value, value)\n \n # Save row \n if code == '10': \n self.data_matrix.append([self.line_data[item] for item in self.parameter_list]) \n print('File loaded: {}'.format(file_path))\n \n \n #==========================================================================\n def save_data(self, file_path):\n self.row_df.to_csv(file_path, sep='\\t', encoding='cp1252', index=False)\n \n \n\n " ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
kurazu/advent_of_code_2021
[ "a4b18e0e7f286d3485d85f2a1a58c7bdea0115d7" ]
[ "advent/day_13/task_1.py" ]
[ "import enum\nimport itertools\nimport logging\nimport re\nfrom typing import Callable, Dict, Iterable, List, Optional, TextIO, Tuple\n\nimport numpy as np\nimport numpy.typing as npt\n\nfrom ..cli import run_with_file_argument\nfrom ..io_utils import get_lines\n\nlogger = logging.getLogger(__name__)\nPATTERN = re.compile(r\"^fold along (?P<axis>[xy])=(?P<value>\\d+)$\")\n\n\nclass FoldAxis(enum.Enum):\n FOLD_UP = \"y\"\n FOLD_LEFT = \"x\"\n\n\ndef get_paper(input: TextIO) -> npt.NDArray[bool]:\n stripped_lines = map(str.strip, input)\n point_x: List[int] = []\n point_y: List[int] = []\n for line in stripped_lines:\n if not line:\n break # the dots sections is finished\n x, y = map(int, line.split(\",\"))\n point_x.append(x)\n point_y.append(y)\n xs = np.array(point_x)\n ys = np.array(point_y)\n del point_x\n del point_y\n width = np.max(xs)\n height = np.max(ys)\n paper = np.zeros((height + 1, width + 1), dtype=bool)\n paper[[ys, xs]] = True\n return paper\n\n\ndef get_folds(input: TextIO) -> Iterable[Tuple[FoldAxis, int]]:\n for line in get_lines(input):\n match = PATTERN.match(line)\n assert match is not None\n axis = FoldAxis(match.group(\"axis\"))\n value = int(match.group(\"value\"))\n yield axis, value\n\n\nbool_array_to_string = np.vectorize({False: \".\", True: \"#\"}.__getitem__)\n\n\ndef format_paper(paper: npt.NDArray[bool]) -> str:\n string_array = bool_array_to_string(paper)\n return \"\\n\".join(map(\"\".join, string_array))\n\n\ndef fold_left(paper: npt.NDArray[bool], fold_line: int) -> npt.NDArray[bool]:\n # split along the verical line\n leftside = paper[:, :fold_line]\n rightside = paper[:, fold_line + 1 :]\n # flip the right side left-to-right\n flipped_rightside = rightside[:, ::-1]\n # create a new canvas the size of the bigger part\n height, leftside_width = leftside.shape\n height, rightside_width = flipped_rightside.shape\n canvas = np.zeros((height, max(leftside_width, rightside_width)), dtype=bool)\n # paint both leftside and flipped rightside onto the canvas\n # anchoring both at the canvas right\n canvas[:, -leftside_width:] |= leftside\n canvas[:, -rightside_width:] |= flipped_rightside\n return canvas\n\n\ndef fold_up(paper: npt.NDArray[bool], fold_line: int) -> npt.NDArray[bool]:\n # split along the horizontal line\n upside = paper[:fold_line, :]\n downside = paper[fold_line + 1 :, :]\n # flip the bottom part upside-down\n flipped_downside = downside[::-1, :]\n # create a new canvas the size of the bigger part\n upside_height, width = upside.shape\n downside_height = len(flipped_downside)\n canvas = np.zeros((max(upside_height, downside_height), width), dtype=bool)\n # paint both upside and flipped downside onto the canvas\n # anchoring both at the canvas bottom\n canvas[-upside_height:, :] |= upside\n canvas[-downside_height:, :] |= flipped_downside\n return canvas\n\n\nFOLDING_MAP: Dict[FoldAxis, Callable[[npt.NDArray[bool], int], npt.NDArray[bool]]] = {\n FoldAxis.FOLD_LEFT: fold_left,\n FoldAxis.FOLD_UP: fold_up,\n}\n\n\ndef main(input: TextIO) -> str:\n paper = get_paper(input)\n logger.info(\"Initial paper\\n%s\", format_paper(paper))\n # not now analyze only the first fold\n folds = get_folds(input)\n folds = itertools.islice(folds, 1)\n for axis, value in folds:\n folding_callback = FOLDING_MAP[axis]\n paper = folding_callback(paper, value)\n logger.info(\"After folding %s at %s\\n%s\", axis.name, value, format_paper(paper))\n\n number_of_points = np.sum(paper > 0)\n\n return f\"{number_of_points}\"\n\n\nif __name__ == \"__main__\":\n run_with_file_argument(main)\n" ]
[ [ "numpy.max", "numpy.vectorize", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
su-khi/pandas
[ "ffe312c4beb3ca09c1fb45cf727d7c17e276e8fd" ]
[ "pandas/core/resample.py" ]
[ "from datetime import timedelta\nimport numpy as np\nimport warnings\nimport copy\nfrom textwrap import dedent\n\nimport pandas as pd\nfrom pandas.core.groupby.base import GroupByMixin\nfrom pandas.core.groupby.ops import BinGrouper\nfrom pandas.core.groupby.groupby import (\n _GroupBy, GroupBy, groupby, _pipe_template\n)\nfrom pandas.core.groupby.grouper import Grouper\nfrom pandas.core.groupby.generic import SeriesGroupBy, PanelGroupBy\n\nfrom pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod\nfrom pandas.core.indexes.datetimes import DatetimeIndex, date_range\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex\nfrom pandas.tseries.offsets import DateOffset, Tick, Day, delta_to_nanoseconds\nfrom pandas.core.indexes.period import PeriodIndex\nfrom pandas.errors import AbstractMethodError\nimport pandas.core.algorithms as algos\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCSeries\n\nimport pandas.compat as compat\nfrom pandas.compat.numpy import function as nv\n\nfrom pandas._libs import lib\nfrom pandas._libs.tslibs import Timestamp, NaT\nfrom pandas._libs.tslibs.period import IncompatibleFrequency\n\nfrom pandas.util._decorators import Appender, Substitution\nfrom pandas.core.generic import _shared_docs\n_shared_docs_kwargs = dict()\n\n\nclass Resampler(_GroupBy):\n\n \"\"\"\n Class for resampling datetimelike data, a groupby-like operation.\n See aggregate, transform, and apply functions on this object.\n\n It's easiest to use obj.resample(...) to use Resampler.\n\n Parameters\n ----------\n obj : pandas object\n groupby : a TimeGrouper object\n axis : int, default 0\n kind : str or None\n 'period', 'timestamp' to override default index treatement\n\n Notes\n -----\n After resampling, see aggregate, apply, and transform functions.\n\n Returns\n -------\n a Resampler of the appropriate type\n \"\"\"\n\n # to the groupby descriptor\n _attributes = ['freq', 'axis', 'closed', 'label', 'convention',\n 'loffset', 'base', 'kind']\n\n def __init__(self, obj, groupby=None, axis=0, kind=None, **kwargs):\n self.groupby = groupby\n self.keys = None\n self.sort = True\n self.axis = axis\n self.kind = kind\n self.squeeze = False\n self.group_keys = True\n self.as_index = True\n self.exclusions = set()\n self.binner = None\n self.grouper = None\n\n if self.groupby is not None:\n self.groupby._set_grouper(self._convert_obj(obj), sort=True)\n\n def __unicode__(self):\n \"\"\" provide a nice str repr of our rolling object \"\"\"\n attrs = [\"{k}={v}\".format(k=k, v=getattr(self.groupby, k))\n for k in self._attributes if\n getattr(self.groupby, k, None) is not None]\n return \"{klass} [{attrs}]\".format(klass=self.__class__.__name__,\n attrs=', '.join(attrs))\n\n def __getattr__(self, attr):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self._attributes:\n return getattr(self.groupby, attr)\n if attr in self.obj:\n return self[attr]\n\n return object.__getattribute__(self, attr)\n\n @property\n def obj(self):\n return self.groupby.obj\n\n @property\n def ax(self):\n return self.groupby.ax\n\n @property\n def _typ(self):\n \"\"\" masquerade for compat as a Series or a DataFrame \"\"\"\n if isinstance(self._selected_obj, pd.Series):\n return 'series'\n return 'dataframe'\n\n @property\n def _from_selection(self):\n \"\"\" is the resampling from a DataFrame column or MultiIndex level \"\"\"\n # upsampling and PeriodIndex resampling do not work\n # with selection, this state used to catch and raise an error\n return (self.groupby is not None and\n (self.groupby.key is not None or\n self.groupby.level is not None))\n\n def _convert_obj(self, obj):\n \"\"\"\n provide any conversions for the object in order to correctly handle\n\n Parameters\n ----------\n obj : the object to be resampled\n\n Returns\n -------\n obj : converted object\n \"\"\"\n obj = obj._consolidate()\n return obj\n\n def _get_binner_for_time(self):\n raise AbstractMethodError(self)\n\n def _set_binner(self):\n \"\"\"\n setup our binners\n cache these as we are an immutable object\n \"\"\"\n\n if self.binner is None:\n self.binner, self.grouper = self._get_binner()\n\n def _get_binner(self):\n \"\"\"\n create the BinGrouper, assume that self.set_grouper(obj)\n has already been called\n \"\"\"\n\n binner, bins, binlabels = self._get_binner_for_time()\n bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer)\n return binner, bin_grouper\n\n def _assure_grouper(self):\n \"\"\" make sure that we are creating our binner & grouper \"\"\"\n self._set_binner()\n\n @Substitution(klass='Resampler',\n versionadded='.. versionadded:: 0.23.0',\n examples=\"\"\"\n>>> df = pd.DataFrame({'A': [1, 2, 3, 4]},\n... index=pd.date_range('2012-08-02', periods=4))\n>>> df\n A\n2012-08-02 1\n2012-08-03 2\n2012-08-04 3\n2012-08-05 4\n\nTo get the difference between each 2-day period's maximum and minimum value in\none pass, you can do\n\n>>> df.resample('2D').pipe(lambda x: x.max() - x.min())\n A\n2012-08-02 1\n2012-08-04 1\"\"\")\n @Appender(_pipe_template)\n def pipe(self, func, *args, **kwargs):\n return super(Resampler, self).pipe(func, *args, **kwargs)\n\n _agg_doc = dedent(\"\"\"\n\n Examples\n --------\n >>> s = pd.Series([1,2,3,4,5],\n index=pd.date_range('20130101', periods=5,freq='s'))\n 2013-01-01 00:00:00 1\n 2013-01-01 00:00:01 2\n 2013-01-01 00:00:02 3\n 2013-01-01 00:00:03 4\n 2013-01-01 00:00:04 5\n Freq: S, dtype: int64\n\n >>> r = s.resample('2s')\n DatetimeIndexResampler [freq=<2 * Seconds>, axis=0, closed=left,\n label=left, convention=start, base=0]\n\n >>> r.agg(np.sum)\n 2013-01-01 00:00:00 3\n 2013-01-01 00:00:02 7\n 2013-01-01 00:00:04 5\n Freq: 2S, dtype: int64\n\n >>> r.agg(['sum','mean','max'])\n sum mean max\n 2013-01-01 00:00:00 3 1.5 2\n 2013-01-01 00:00:02 7 3.5 4\n 2013-01-01 00:00:04 5 5.0 5\n\n >>> r.agg({'result' : lambda x: x.mean() / x.std(),\n 'total' : np.sum})\n total result\n 2013-01-01 00:00:00 3 2.121320\n 2013-01-01 00:00:02 7 4.949747\n 2013-01-01 00:00:04 5 NaN\n\n See also\n --------\n pandas.DataFrame.groupby.aggregate\n pandas.DataFrame.resample.transform\n pandas.DataFrame.aggregate\n\n \"\"\")\n\n @Appender(_agg_doc)\n @Appender(_shared_docs['aggregate'] % dict(\n klass='DataFrame',\n versionadded='',\n axis=''))\n def aggregate(self, arg, *args, **kwargs):\n\n self._set_binner()\n result, how = self._aggregate(arg, *args, **kwargs)\n if result is None:\n result = self._groupby_and_aggregate(arg,\n *args,\n **kwargs)\n\n result = self._apply_loffset(result)\n return result\n\n agg = aggregate\n apply = aggregate\n\n def transform(self, arg, *args, **kwargs):\n \"\"\"\n Call function producing a like-indexed Series on each group and return\n a Series with the transformed values\n\n Parameters\n ----------\n func : function\n To apply to each group. Should return a Series with the same index\n\n Examples\n --------\n >>> resampled.transform(lambda x: (x - x.mean()) / x.std())\n\n Returns\n -------\n transformed : Series\n \"\"\"\n return self._selected_obj.groupby(self.groupby).transform(\n arg, *args, **kwargs)\n\n def _downsample(self, f):\n raise AbstractMethodError(self)\n\n def _upsample(self, f, limit=None, fill_value=None):\n raise AbstractMethodError(self)\n\n def _gotitem(self, key, ndim, subset=None):\n \"\"\"\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n self._set_binner()\n grouper = self.grouper\n if subset is None:\n subset = self.obj\n grouped = groupby(subset, by=None, grouper=grouper, axis=self.axis)\n\n # try the key selection\n try:\n return grouped[key]\n except KeyError:\n return grouped\n\n def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):\n \"\"\" re-evaluate the obj with a groupby aggregation \"\"\"\n\n if grouper is None:\n self._set_binner()\n grouper = self.grouper\n\n obj = self._selected_obj\n\n try:\n grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis)\n except TypeError:\n\n # panel grouper\n grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis)\n\n try:\n if isinstance(obj, ABCDataFrame) and compat.callable(how):\n # Check if the function is reducing or not.\n result = grouped._aggregate_item_by_item(how, *args, **kwargs)\n else:\n result = grouped.aggregate(how, *args, **kwargs)\n except Exception:\n\n # we have a non-reducing function\n # try to evaluate\n result = grouped.apply(how, *args, **kwargs)\n\n result = self._apply_loffset(result)\n return self._wrap_result(result)\n\n def _apply_loffset(self, result):\n \"\"\"\n if loffset is set, offset the result index\n\n This is NOT an idempotent routine, it will be applied\n exactly once to the result.\n\n Parameters\n ----------\n result : Series or DataFrame\n the result of resample\n \"\"\"\n\n needs_offset = (\n isinstance(self.loffset, (DateOffset, timedelta)) and\n isinstance(result.index, DatetimeIndex) and\n len(result.index) > 0\n )\n\n if needs_offset:\n result.index = result.index + self.loffset\n\n self.loffset = None\n return result\n\n def _get_resampler_for_grouping(self, groupby, **kwargs):\n \"\"\" return the correct class for resampling with groupby \"\"\"\n return self._resampler_for_grouping(self, groupby=groupby, **kwargs)\n\n def _wrap_result(self, result):\n \"\"\" potentially wrap any results \"\"\"\n if isinstance(result, ABCSeries) and self._selection is not None:\n result.name = self._selection\n\n if isinstance(result, ABCSeries) and result.empty:\n obj = self.obj\n result.index = obj.index._shallow_copy(freq=to_offset(self.freq))\n result.name = getattr(obj, 'name', None)\n\n return result\n\n def pad(self, limit=None):\n \"\"\"\n Forward fill the values\n\n Parameters\n ----------\n limit : integer, optional\n limit of how many values to fill\n\n Returns\n -------\n an upsampled Series\n\n See Also\n --------\n Series.fillna\n DataFrame.fillna\n \"\"\"\n return self._upsample('pad', limit=limit)\n ffill = pad\n\n def nearest(self, limit=None):\n \"\"\"\n Fill values with nearest neighbor starting from center\n\n Parameters\n ----------\n limit : integer, optional\n limit of how many values to fill\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n an upsampled Series\n\n See Also\n --------\n Series.fillna\n DataFrame.fillna\n \"\"\"\n return self._upsample('nearest', limit=limit)\n\n def backfill(self, limit=None):\n \"\"\"\n Backward fill the new missing values in the resampled data.\n\n In statistics, imputation is the process of replacing missing data with\n substituted values [1]_. When resampling data, missing values may\n appear (e.g., when the resampling frequency is higher than the original\n frequency). The backward fill will replace NaN values that appeared in\n the resampled data with the next value in the original sequence.\n Missing values that existed in the original data will not be modified.\n\n Parameters\n ----------\n limit : integer, optional\n Limit of how many values to fill.\n\n Returns\n -------\n Series, DataFrame\n An upsampled Series or DataFrame with backward filled NaN values.\n\n See Also\n --------\n bfill : Alias of backfill.\n fillna : Fill NaN values using the specified method, which can be\n 'backfill'.\n nearest : Fill NaN values with nearest neighbor starting from center.\n pad : Forward fill NaN values.\n pandas.Series.fillna : Fill NaN values in the Series using the\n specified method, which can be 'backfill'.\n pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the\n specified method, which can be 'backfill'.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)\n\n Examples\n --------\n\n Resampling a Series:\n\n >>> s = pd.Series([1, 2, 3],\n ... index=pd.date_range('20180101', periods=3, freq='h'))\n >>> s\n 2018-01-01 00:00:00 1\n 2018-01-01 01:00:00 2\n 2018-01-01 02:00:00 3\n Freq: H, dtype: int64\n\n >>> s.resample('30min').backfill()\n 2018-01-01 00:00:00 1\n 2018-01-01 00:30:00 2\n 2018-01-01 01:00:00 2\n 2018-01-01 01:30:00 3\n 2018-01-01 02:00:00 3\n Freq: 30T, dtype: int64\n\n >>> s.resample('15min').backfill(limit=2)\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:15:00 NaN\n 2018-01-01 00:30:00 2.0\n 2018-01-01 00:45:00 2.0\n 2018-01-01 01:00:00 2.0\n 2018-01-01 01:15:00 NaN\n 2018-01-01 01:30:00 3.0\n 2018-01-01 01:45:00 3.0\n 2018-01-01 02:00:00 3.0\n Freq: 15T, dtype: float64\n\n Resampling a DataFrame that has missing values:\n\n >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},\n ... index=pd.date_range('20180101', periods=3,\n ... freq='h'))\n >>> df\n a b\n 2018-01-01 00:00:00 2.0 1\n 2018-01-01 01:00:00 NaN 3\n 2018-01-01 02:00:00 6.0 5\n\n >>> df.resample('30min').backfill()\n a b\n 2018-01-01 00:00:00 2.0 1\n 2018-01-01 00:30:00 NaN 3\n 2018-01-01 01:00:00 NaN 3\n 2018-01-01 01:30:00 6.0 5\n 2018-01-01 02:00:00 6.0 5\n\n >>> df.resample('15min').backfill(limit=2)\n a b\n 2018-01-01 00:00:00 2.0 1.0\n 2018-01-01 00:15:00 NaN NaN\n 2018-01-01 00:30:00 NaN 3.0\n 2018-01-01 00:45:00 NaN 3.0\n 2018-01-01 01:00:00 NaN 3.0\n 2018-01-01 01:15:00 NaN NaN\n 2018-01-01 01:30:00 6.0 5.0\n 2018-01-01 01:45:00 6.0 5.0\n 2018-01-01 02:00:00 6.0 5.0\n \"\"\"\n return self._upsample('backfill', limit=limit)\n bfill = backfill\n\n def fillna(self, method, limit=None):\n \"\"\"\n Fill missing values introduced by upsampling.\n\n In statistics, imputation is the process of replacing missing data with\n substituted values [1]_. When resampling data, missing values may\n appear (e.g., when the resampling frequency is higher than the original\n frequency).\n\n Missing values that existed in the original data will\n not be modified.\n\n Parameters\n ----------\n method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'}\n Method to use for filling holes in resampled data\n\n * 'pad' or 'ffill': use previous valid observation to fill gap\n (forward fill).\n * 'backfill' or 'bfill': use next valid observation to fill gap.\n * 'nearest': use nearest valid observation to fill gap.\n\n limit : integer, optional\n Limit of how many consecutive missing values to fill.\n\n Returns\n -------\n Series or DataFrame\n An upsampled Series or DataFrame with missing values filled.\n\n See Also\n --------\n backfill : Backward fill NaN values in the resampled data.\n pad : Forward fill NaN values in the resampled data.\n nearest : Fill NaN values in the resampled data\n with nearest neighbor starting from center.\n interpolate : Fill NaN values using interpolation.\n pandas.Series.fillna : Fill NaN values in the Series using the\n specified method, which can be 'bfill' and 'ffill'.\n pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the\n specified method, which can be 'bfill' and 'ffill'.\n\n Examples\n --------\n Resampling a Series:\n\n >>> s = pd.Series([1, 2, 3],\n ... index=pd.date_range('20180101', periods=3, freq='h'))\n >>> s\n 2018-01-01 00:00:00 1\n 2018-01-01 01:00:00 2\n 2018-01-01 02:00:00 3\n Freq: H, dtype: int64\n\n Without filling the missing values you get:\n\n >>> s.resample(\"30min\").asfreq()\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:30:00 NaN\n 2018-01-01 01:00:00 2.0\n 2018-01-01 01:30:00 NaN\n 2018-01-01 02:00:00 3.0\n Freq: 30T, dtype: float64\n\n >>> s.resample('30min').fillna(\"backfill\")\n 2018-01-01 00:00:00 1\n 2018-01-01 00:30:00 2\n 2018-01-01 01:00:00 2\n 2018-01-01 01:30:00 3\n 2018-01-01 02:00:00 3\n Freq: 30T, dtype: int64\n\n >>> s.resample('15min').fillna(\"backfill\", limit=2)\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:15:00 NaN\n 2018-01-01 00:30:00 2.0\n 2018-01-01 00:45:00 2.0\n 2018-01-01 01:00:00 2.0\n 2018-01-01 01:15:00 NaN\n 2018-01-01 01:30:00 3.0\n 2018-01-01 01:45:00 3.0\n 2018-01-01 02:00:00 3.0\n Freq: 15T, dtype: float64\n\n >>> s.resample('30min').fillna(\"pad\")\n 2018-01-01 00:00:00 1\n 2018-01-01 00:30:00 1\n 2018-01-01 01:00:00 2\n 2018-01-01 01:30:00 2\n 2018-01-01 02:00:00 3\n Freq: 30T, dtype: int64\n\n >>> s.resample('30min').fillna(\"nearest\")\n 2018-01-01 00:00:00 1\n 2018-01-01 00:30:00 2\n 2018-01-01 01:00:00 2\n 2018-01-01 01:30:00 3\n 2018-01-01 02:00:00 3\n Freq: 30T, dtype: int64\n\n Missing values present before the upsampling are not affected.\n\n >>> sm = pd.Series([1, None, 3],\n ... index=pd.date_range('20180101', periods=3, freq='h'))\n >>> sm\n 2018-01-01 00:00:00 1.0\n 2018-01-01 01:00:00 NaN\n 2018-01-01 02:00:00 3.0\n Freq: H, dtype: float64\n\n >>> sm.resample('30min').fillna('backfill')\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:30:00 NaN\n 2018-01-01 01:00:00 NaN\n 2018-01-01 01:30:00 3.0\n 2018-01-01 02:00:00 3.0\n Freq: 30T, dtype: float64\n\n >>> sm.resample('30min').fillna('pad')\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:30:00 1.0\n 2018-01-01 01:00:00 NaN\n 2018-01-01 01:30:00 NaN\n 2018-01-01 02:00:00 3.0\n Freq: 30T, dtype: float64\n\n >>> sm.resample('30min').fillna('nearest')\n 2018-01-01 00:00:00 1.0\n 2018-01-01 00:30:00 NaN\n 2018-01-01 01:00:00 NaN\n 2018-01-01 01:30:00 3.0\n 2018-01-01 02:00:00 3.0\n Freq: 30T, dtype: float64\n\n DataFrame resampling is done column-wise. All the same options are\n available.\n\n >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},\n ... index=pd.date_range('20180101', periods=3,\n ... freq='h'))\n >>> df\n a b\n 2018-01-01 00:00:00 2.0 1\n 2018-01-01 01:00:00 NaN 3\n 2018-01-01 02:00:00 6.0 5\n\n >>> df.resample('30min').fillna(\"bfill\")\n a b\n 2018-01-01 00:00:00 2.0 1\n 2018-01-01 00:30:00 NaN 3\n 2018-01-01 01:00:00 NaN 3\n 2018-01-01 01:30:00 6.0 5\n 2018-01-01 02:00:00 6.0 5\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)\n \"\"\"\n return self._upsample(method, limit=limit)\n\n @Appender(_shared_docs['interpolate'] % _shared_docs_kwargs)\n def interpolate(self, method='linear', axis=0, limit=None, inplace=False,\n limit_direction='forward', limit_area=None,\n downcast=None, **kwargs):\n \"\"\"\n Interpolate values according to different methods.\n\n .. versionadded:: 0.18.1\n \"\"\"\n result = self._upsample(None)\n return result.interpolate(method=method, axis=axis, limit=limit,\n inplace=inplace,\n limit_direction=limit_direction,\n limit_area=limit_area,\n downcast=downcast, **kwargs)\n\n def asfreq(self, fill_value=None):\n \"\"\"\n return the values at the new freq,\n essentially a reindex\n\n Parameters\n ----------\n fill_value: scalar, optional\n Value to use for missing values, applied during upsampling (note\n this does not fill NaNs that already were present).\n\n .. versionadded:: 0.20.0\n\n See Also\n --------\n Series.asfreq\n DataFrame.asfreq\n \"\"\"\n return self._upsample('asfreq', fill_value=fill_value)\n\n def std(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute standard deviation of groups, excluding missing values\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n nv.validate_resampler_func('std', args, kwargs)\n return self._downsample('std', ddof=ddof)\n\n def var(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute variance of groups, excluding missing values\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n nv.validate_resampler_func('var', args, kwargs)\n return self._downsample('var', ddof=ddof)\n\n @Appender(GroupBy.size.__doc__)\n def size(self):\n # It's a special case as higher level does return\n # a copy of 0-len objects. GH14962\n result = self._downsample('size')\n if not len(self.ax) and isinstance(self._selected_obj, ABCDataFrame):\n result = pd.Series([], index=result.index, dtype='int64')\n return result\n\n\n# downsample methods\nfor method in ['sum', 'prod']:\n\n def f(self, _method=method, min_count=0, *args, **kwargs):\n nv.validate_resampler_func(_method, args, kwargs)\n return self._downsample(_method, min_count=min_count)\n f.__doc__ = getattr(GroupBy, method).__doc__\n setattr(Resampler, method, f)\n\n\n# downsample methods\nfor method in ['min', 'max', 'first', 'last', 'mean', 'sem',\n 'median', 'ohlc']:\n\n def f(self, _method=method, *args, **kwargs):\n nv.validate_resampler_func(_method, args, kwargs)\n return self._downsample(_method)\n f.__doc__ = getattr(GroupBy, method).__doc__\n setattr(Resampler, method, f)\n\n# groupby & aggregate methods\nfor method in ['count']:\n def f(self, _method=method):\n return self._downsample(_method)\n f.__doc__ = getattr(GroupBy, method).__doc__\n setattr(Resampler, method, f)\n\n# series only methods\nfor method in ['nunique']:\n def f(self, _method=method):\n return self._downsample(_method)\n f.__doc__ = getattr(SeriesGroupBy, method).__doc__\n setattr(Resampler, method, f)\n\n\ndef _maybe_process_deprecations(r, how=None, fill_method=None, limit=None):\n \"\"\" potentially we might have a deprecation warning, show it\n but call the appropriate methods anyhow \"\"\"\n\n if how is not None:\n\n # .resample(..., how='sum')\n if isinstance(how, compat.string_types):\n method = \"{0}()\".format(how)\n\n # .resample(..., how=lambda x: ....)\n else:\n method = \".apply(<func>)\"\n\n # if we have both a how and fill_method, then show\n # the following warning\n if fill_method is None:\n warnings.warn(\"how in .resample() is deprecated\\n\"\n \"the new syntax is \"\n \".resample(...).{method}\".format(\n method=method),\n FutureWarning, stacklevel=3)\n r = r.aggregate(how)\n\n if fill_method is not None:\n\n # show the prior function call\n method = '.' + method if how is not None else ''\n\n args = \"limit={0}\".format(limit) if limit is not None else \"\"\n warnings.warn(\"fill_method is deprecated to .resample()\\n\"\n \"the new syntax is .resample(...){method}\"\n \".{fill_method}({args})\".format(\n method=method,\n fill_method=fill_method,\n args=args),\n FutureWarning, stacklevel=3)\n\n if how is not None:\n r = getattr(r, fill_method)(limit=limit)\n else:\n r = r.aggregate(fill_method, limit=limit)\n\n return r\n\n\nclass _GroupByMixin(GroupByMixin):\n \"\"\" provide the groupby facilities \"\"\"\n\n def __init__(self, obj, *args, **kwargs):\n\n parent = kwargs.pop('parent', None)\n groupby = kwargs.pop('groupby', None)\n if parent is None:\n parent = obj\n\n # initialize our GroupByMixin object with\n # the resampler attributes\n for attr in self._attributes:\n setattr(self, attr, kwargs.get(attr, getattr(parent, attr)))\n\n super(_GroupByMixin, self).__init__(None)\n self._groupby = groupby\n self._groupby.mutated = True\n self._groupby.grouper.mutated = True\n self.groupby = copy.copy(parent.groupby)\n\n def _apply(self, f, **kwargs):\n \"\"\"\n dispatch to _upsample; we are stripping all of the _upsample kwargs and\n performing the original function call on the grouped object\n \"\"\"\n\n def func(x):\n x = self._shallow_copy(x, groupby=self.groupby)\n\n if isinstance(f, compat.string_types):\n return getattr(x, f)(**kwargs)\n\n return x.apply(f, **kwargs)\n\n result = self._groupby.apply(func)\n return self._wrap_result(result)\n\n _upsample = _apply\n _downsample = _apply\n _groupby_and_aggregate = _apply\n\n\nclass DatetimeIndexResampler(Resampler):\n\n @property\n def _resampler_for_grouping(self):\n return DatetimeIndexResamplerGroupby\n\n def _get_binner_for_time(self):\n\n # this is how we are actually creating the bins\n if self.kind == 'period':\n return self.groupby._get_time_period_bins(self.ax)\n return self.groupby._get_time_bins(self.ax)\n\n def _downsample(self, how, **kwargs):\n \"\"\"\n Downsample the cython defined function\n\n Parameters\n ----------\n how : string / cython mapped function\n **kwargs : kw args passed to how function\n \"\"\"\n self._set_binner()\n how = self._is_cython_func(how) or how\n ax = self.ax\n obj = self._selected_obj\n\n if not len(ax):\n # reset to the new freq\n obj = obj.copy()\n obj.index.freq = self.freq\n return obj\n\n # do we have a regular frequency\n if ax.freq is not None or ax.inferred_freq is not None:\n\n if len(self.grouper.binlabels) > len(ax) and how is None:\n\n # let's do an asfreq\n return self.asfreq()\n\n # we are downsampling\n # we want to call the actual grouper method here\n result = obj.groupby(\n self.grouper, axis=self.axis).aggregate(how, **kwargs)\n\n result = self._apply_loffset(result)\n return self._wrap_result(result)\n\n def _adjust_binner_for_upsample(self, binner):\n \"\"\" adjust our binner when upsampling \"\"\"\n if self.closed == 'right':\n binner = binner[1:]\n else:\n binner = binner[:-1]\n return binner\n\n def _upsample(self, method, limit=None, fill_value=None):\n \"\"\"\n method : string {'backfill', 'bfill', 'pad',\n 'ffill', 'asfreq'} method for upsampling\n limit : int, default None\n Maximum size gap to fill when reindexing\n fill_value : scalar, default None\n Value to use for missing values\n\n See also\n --------\n .fillna\n\n \"\"\"\n self._set_binner()\n if self.axis:\n raise AssertionError('axis must be 0')\n if self._from_selection:\n raise ValueError(\"Upsampling from level= or on= selection\"\n \" is not supported, use .set_index(...)\"\n \" to explicitly set index to\"\n \" datetime-like\")\n\n ax = self.ax\n obj = self._selected_obj\n binner = self.binner\n res_index = self._adjust_binner_for_upsample(binner)\n\n # if we have the same frequency as our axis, then we are equal sampling\n if limit is None and to_offset(ax.inferred_freq) == self.freq:\n result = obj.copy()\n result.index = res_index\n else:\n result = obj.reindex(res_index, method=method,\n limit=limit, fill_value=fill_value)\n\n result = self._apply_loffset(result)\n return self._wrap_result(result)\n\n def _wrap_result(self, result):\n result = super(DatetimeIndexResampler, self)._wrap_result(result)\n\n # we may have a different kind that we were asked originally\n # convert if needed\n if self.kind == 'period' and not isinstance(result.index, PeriodIndex):\n result.index = result.index.to_period(self.freq)\n return result\n\n\nclass DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler):\n \"\"\"\n Provides a resample of a groupby implementation\n\n .. versionadded:: 0.18.1\n\n \"\"\"\n @property\n def _constructor(self):\n return DatetimeIndexResampler\n\n\nclass PeriodIndexResampler(DatetimeIndexResampler):\n\n @property\n def _resampler_for_grouping(self):\n return PeriodIndexResamplerGroupby\n\n def _get_binner_for_time(self):\n if self.kind == 'timestamp':\n return super(PeriodIndexResampler, self)._get_binner_for_time()\n return self.groupby._get_period_bins(self.ax)\n\n def _convert_obj(self, obj):\n obj = super(PeriodIndexResampler, self)._convert_obj(obj)\n\n if self._from_selection:\n # see GH 14008, GH 12871\n msg = (\"Resampling from level= or on= selection\"\n \" with a PeriodIndex is not currently supported,\"\n \" use .set_index(...) to explicitly set index\")\n raise NotImplementedError(msg)\n\n if self.loffset is not None:\n # Cannot apply loffset/timedelta to PeriodIndex -> convert to\n # timestamps\n self.kind = 'timestamp'\n\n # convert to timestamp\n if self.kind == 'timestamp':\n obj = obj.to_timestamp(how=self.convention)\n\n return obj\n\n def _downsample(self, how, **kwargs):\n \"\"\"\n Downsample the cython defined function\n\n Parameters\n ----------\n how : string / cython mapped function\n **kwargs : kw args passed to how function\n \"\"\"\n\n # we may need to actually resample as if we are timestamps\n if self.kind == 'timestamp':\n return super(PeriodIndexResampler, self)._downsample(how, **kwargs)\n\n how = self._is_cython_func(how) or how\n ax = self.ax\n\n if is_subperiod(ax.freq, self.freq):\n # Downsampling\n return self._groupby_and_aggregate(how, grouper=self.grouper)\n elif is_superperiod(ax.freq, self.freq):\n if how == 'ohlc':\n # GH #13083\n # upsampling to subperiods is handled as an asfreq, which works\n # for pure aggregating/reducing methods\n # OHLC reduces along the time dimension, but creates multiple\n # values for each period -> handle by _groupby_and_aggregate()\n return self._groupby_and_aggregate(how, grouper=self.grouper)\n return self.asfreq()\n elif ax.freq == self.freq:\n return self.asfreq()\n\n raise IncompatibleFrequency(\n 'Frequency {} cannot be resampled to {}, as they are not '\n 'sub or super periods'.format(ax.freq, self.freq))\n\n def _upsample(self, method, limit=None, fill_value=None):\n \"\"\"\n method : string {'backfill', 'bfill', 'pad', 'ffill'}\n method for upsampling\n limit : int, default None\n Maximum size gap to fill when reindexing\n fill_value : scalar, default None\n Value to use for missing values\n\n See also\n --------\n .fillna\n\n \"\"\"\n\n # we may need to actually resample as if we are timestamps\n if self.kind == 'timestamp':\n return super(PeriodIndexResampler, self)._upsample(\n method, limit=limit, fill_value=fill_value)\n\n self._set_binner()\n ax = self.ax\n obj = self.obj\n new_index = self.binner\n\n # Start vs. end of period\n memb = ax.asfreq(self.freq, how=self.convention)\n\n # Get the fill indexer\n indexer = memb.get_indexer(new_index, method=method, limit=limit)\n return self._wrap_result(_take_new_index(\n obj, indexer, new_index, axis=self.axis))\n\n\nclass PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler):\n \"\"\"\n Provides a resample of a groupby implementation\n\n .. versionadded:: 0.18.1\n\n \"\"\"\n @property\n def _constructor(self):\n return PeriodIndexResampler\n\n\nclass TimedeltaIndexResampler(DatetimeIndexResampler):\n\n @property\n def _resampler_for_grouping(self):\n return TimedeltaIndexResamplerGroupby\n\n def _get_binner_for_time(self):\n return self.groupby._get_time_delta_bins(self.ax)\n\n def _adjust_binner_for_upsample(self, binner):\n \"\"\" adjust our binner when upsampling \"\"\"\n ax = self.ax\n\n if is_subperiod(ax.freq, self.freq):\n # We are actually downsampling\n # but are in the asfreq path\n # GH 12926\n if self.closed == 'right':\n binner = binner[1:]\n else:\n binner = binner[:-1]\n return binner\n\n\nclass TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler):\n \"\"\"\n Provides a resample of a groupby implementation\n\n .. versionadded:: 0.18.1\n\n \"\"\"\n @property\n def _constructor(self):\n return TimedeltaIndexResampler\n\n\ndef resample(obj, kind=None, **kwds):\n \"\"\" create a TimeGrouper and return our resampler \"\"\"\n tg = TimeGrouper(**kwds)\n return tg._get_resampler(obj, kind=kind)\n\n\nresample.__doc__ = Resampler.__doc__\n\n\ndef get_resampler_for_grouping(groupby, rule, how=None, fill_method=None,\n limit=None, kind=None, **kwargs):\n \"\"\" return our appropriate resampler when grouping as well \"\"\"\n\n # .resample uses 'on' similar to how .groupby uses 'key'\n kwargs['key'] = kwargs.pop('on', None)\n\n tg = TimeGrouper(freq=rule, **kwargs)\n resampler = tg._get_resampler(groupby.obj, kind=kind)\n r = resampler._get_resampler_for_grouping(groupby=groupby)\n return _maybe_process_deprecations(r,\n how=how,\n fill_method=fill_method,\n limit=limit)\n\n\nclass TimeGrouper(Grouper):\n \"\"\"\n Custom groupby class for time-interval grouping\n\n Parameters\n ----------\n freq : pandas date offset or offset alias for identifying bin edges\n closed : closed end of interval; 'left' or 'right'\n label : interval boundary to use for labeling; 'left' or 'right'\n convention : {'start', 'end', 'e', 's'}\n If axis is PeriodIndex\n \"\"\"\n _attributes = Grouper._attributes + ('closed', 'label', 'how',\n 'loffset', 'kind', 'convention',\n 'base')\n\n def __init__(self, freq='Min', closed=None, label=None, how='mean',\n axis=0, fill_method=None, limit=None, loffset=None,\n kind=None, convention=None, base=0, **kwargs):\n # Check for correctness of the keyword arguments which would\n # otherwise silently use the default if misspelled\n if label not in {None, 'left', 'right'}:\n raise ValueError('Unsupported value {} for `label`'.format(label))\n if closed not in {None, 'left', 'right'}:\n raise ValueError('Unsupported value {} for `closed`'.format(\n closed))\n if convention not in {None, 'start', 'end', 'e', 's'}:\n raise ValueError('Unsupported value {} for `convention`'\n .format(convention))\n\n freq = to_offset(freq)\n\n end_types = {'M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'}\n rule = freq.rule_code\n if (rule in end_types or\n ('-' in rule and rule[:rule.find('-')] in end_types)):\n if closed is None:\n closed = 'right'\n if label is None:\n label = 'right'\n else:\n if closed is None:\n closed = 'left'\n if label is None:\n label = 'left'\n\n self.closed = closed\n self.label = label\n self.kind = kind\n\n self.convention = convention or 'E'\n self.convention = self.convention.lower()\n\n if isinstance(loffset, compat.string_types):\n loffset = to_offset(loffset)\n self.loffset = loffset\n\n self.how = how\n self.fill_method = fill_method\n self.limit = limit\n self.base = base\n\n # always sort time groupers\n kwargs['sort'] = True\n\n super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs)\n\n def _get_resampler(self, obj, kind=None):\n \"\"\"\n return my resampler or raise if we have an invalid axis\n\n Parameters\n ----------\n obj : input object\n kind : string, optional\n 'period','timestamp','timedelta' are valid\n\n Returns\n -------\n a Resampler\n\n Raises\n ------\n TypeError if incompatible axis\n\n \"\"\"\n self._set_grouper(obj)\n\n ax = self.ax\n if isinstance(ax, DatetimeIndex):\n return DatetimeIndexResampler(obj,\n groupby=self,\n kind=kind,\n axis=self.axis)\n elif isinstance(ax, PeriodIndex) or kind == 'period':\n return PeriodIndexResampler(obj,\n groupby=self,\n kind=kind,\n axis=self.axis)\n elif isinstance(ax, TimedeltaIndex):\n return TimedeltaIndexResampler(obj,\n groupby=self,\n axis=self.axis)\n\n raise TypeError(\"Only valid with DatetimeIndex, \"\n \"TimedeltaIndex or PeriodIndex, \"\n \"but got an instance of %r\" % type(ax).__name__)\n\n def _get_grouper(self, obj, validate=True):\n # create the resampler and return our binner\n r = self._get_resampler(obj)\n r._set_binner()\n return r.binner, r.grouper, r.obj\n\n def _get_time_bins(self, ax):\n if not isinstance(ax, DatetimeIndex):\n raise TypeError('axis must be a DatetimeIndex, but got '\n 'an instance of %r' % type(ax).__name__)\n\n if len(ax) == 0:\n binner = labels = DatetimeIndex(\n data=[], freq=self.freq, name=ax.name)\n return binner, [], labels\n\n first, last = ax.min(), ax.max()\n first, last = _get_range_edges(first, last, self.freq,\n closed=self.closed,\n base=self.base)\n tz = ax.tz\n # GH #12037\n # use first/last directly instead of call replace() on them\n # because replace() will swallow the nanosecond part\n # thus last bin maybe slightly before the end if the end contains\n # nanosecond part and lead to `Values falls after last bin` error\n binner = labels = DatetimeIndex(freq=self.freq,\n start=first,\n end=last,\n tz=tz,\n name=ax.name)\n\n # GH 15549\n # In edge case of tz-aware resapmling binner last index can be\n # less than the last variable in data object, this happens because of\n # DST time change\n if len(binner) > 1 and binner[-1] < last:\n extra_date_range = pd.date_range(binner[-1], last + self.freq,\n freq=self.freq, tz=tz,\n name=ax.name)\n binner = labels = binner.append(extra_date_range[1:])\n\n # a little hack\n trimmed = False\n if (len(binner) > 2 and binner[-2] == last and\n self.closed == 'right'):\n\n binner = binner[:-1]\n trimmed = True\n\n ax_values = ax.asi8\n binner, bin_edges = self._adjust_bin_edges(binner, ax_values)\n\n # general version, knowing nothing about relative frequencies\n bins = lib.generate_bins_dt64(\n ax_values, bin_edges, self.closed, hasnans=ax.hasnans)\n\n if self.closed == 'right':\n labels = binner\n if self.label == 'right':\n labels = labels[1:]\n elif not trimmed:\n labels = labels[:-1]\n else:\n if self.label == 'right':\n labels = labels[1:]\n elif not trimmed:\n labels = labels[:-1]\n\n if ax.hasnans:\n binner = binner.insert(0, NaT)\n labels = labels.insert(0, NaT)\n\n # if we end up with more labels than bins\n # adjust the labels\n # GH4076\n if len(bins) < len(labels):\n labels = labels[:len(bins)]\n\n return binner, bins, labels\n\n def _adjust_bin_edges(self, binner, ax_values):\n # Some hacks for > daily data, see #1471, #1458, #1483\n\n bin_edges = binner.asi8\n\n if self.freq != 'D' and is_superperiod(self.freq, 'D'):\n day_nanos = delta_to_nanoseconds(timedelta(1))\n if self.closed == 'right':\n bin_edges = bin_edges + day_nanos - 1\n\n # intraday values on last day\n if bin_edges[-2] > ax_values.max():\n bin_edges = bin_edges[:-1]\n binner = binner[:-1]\n\n return binner, bin_edges\n\n def _get_time_delta_bins(self, ax):\n if not isinstance(ax, TimedeltaIndex):\n raise TypeError('axis must be a TimedeltaIndex, but got '\n 'an instance of %r' % type(ax).__name__)\n\n if not len(ax):\n binner = labels = TimedeltaIndex(\n data=[], freq=self.freq, name=ax.name)\n return binner, [], labels\n\n start = ax[0]\n end = ax[-1]\n labels = binner = TimedeltaIndex(start=start,\n end=end,\n freq=self.freq,\n name=ax.name)\n\n end_stamps = labels + 1\n bins = ax.searchsorted(end_stamps, side='left')\n\n # Addresses GH #10530\n if self.base > 0:\n labels += type(self.freq)(self.base)\n\n return binner, bins, labels\n\n def _get_time_period_bins(self, ax):\n if not isinstance(ax, DatetimeIndex):\n raise TypeError('axis must be a DatetimeIndex, but got '\n 'an instance of %r' % type(ax).__name__)\n\n if not len(ax):\n binner = labels = PeriodIndex(\n data=[], freq=self.freq, name=ax.name)\n return binner, [], labels\n\n labels = binner = PeriodIndex(start=ax[0],\n end=ax[-1],\n freq=self.freq,\n name=ax.name)\n\n end_stamps = (labels + 1).asfreq(self.freq, 's').to_timestamp()\n if ax.tzinfo:\n end_stamps = end_stamps.tz_localize(ax.tzinfo)\n bins = ax.searchsorted(end_stamps, side='left')\n\n return binner, bins, labels\n\n def _get_period_bins(self, ax):\n if not isinstance(ax, PeriodIndex):\n raise TypeError('axis must be a PeriodIndex, but got '\n 'an instance of %r' % type(ax).__name__)\n\n memb = ax.asfreq(self.freq, how=self.convention)\n\n # NaT handling as in pandas._lib.lib.generate_bins_dt64()\n nat_count = 0\n if memb.hasnans:\n nat_count = np.sum(memb._isnan)\n memb = memb[~memb._isnan]\n\n # if index contains no valid (non-NaT) values, return empty index\n if not len(memb):\n binner = labels = PeriodIndex(\n data=[], freq=self.freq, name=ax.name)\n return binner, [], labels\n\n start = ax.min().asfreq(self.freq, how=self.convention)\n end = ax.max().asfreq(self.freq, how='end')\n\n labels = binner = PeriodIndex(start=start, end=end,\n freq=self.freq, name=ax.name)\n\n i8 = memb.asi8\n freq_mult = self.freq.n\n\n # when upsampling to subperiods, we need to generate enough bins\n expected_bins_count = len(binner) * freq_mult\n i8_extend = expected_bins_count - (i8[-1] - i8[0])\n rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult)\n rng += freq_mult\n bins = memb.searchsorted(rng, side='left')\n\n if nat_count > 0:\n # NaT handling as in pandas._lib.lib.generate_bins_dt64()\n # shift bins by the number of NaT\n bins += nat_count\n bins = np.insert(bins, 0, nat_count)\n binner = binner.insert(0, NaT)\n labels = labels.insert(0, NaT)\n\n return binner, bins, labels\n\n\ndef _take_new_index(obj, indexer, new_index, axis=0):\n from pandas.core.api import Series, DataFrame\n\n if isinstance(obj, Series):\n new_values = algos.take_1d(obj.values, indexer)\n return Series(new_values, index=new_index, name=obj.name)\n elif isinstance(obj, DataFrame):\n if axis == 1:\n raise NotImplementedError(\"axis 1 is not supported\")\n return DataFrame(obj._data.reindex_indexer(\n new_axis=new_index, indexer=indexer, axis=1))\n else:\n raise ValueError(\"'obj' should be either a Series or a DataFrame\")\n\n\ndef _get_range_edges(first, last, offset, closed='left', base=0):\n if isinstance(offset, compat.string_types):\n offset = to_offset(offset)\n\n if isinstance(offset, Tick):\n is_day = isinstance(offset, Day)\n day_nanos = delta_to_nanoseconds(timedelta(1))\n\n # #1165\n if (is_day and day_nanos % offset.nanos == 0) or not is_day:\n return _adjust_dates_anchored(first, last, offset,\n closed=closed, base=base)\n\n if not isinstance(offset, Tick): # and first.time() != last.time():\n # hack!\n first = first.normalize()\n last = last.normalize()\n\n if closed == 'left':\n first = Timestamp(offset.rollback(first))\n else:\n first = Timestamp(first - offset)\n\n last = Timestamp(last + offset)\n\n return first, last\n\n\ndef _adjust_dates_anchored(first, last, offset, closed='right', base=0):\n # First and last offsets should be calculated from the start day to fix an\n # error cause by resampling across multiple days when a one day period is\n # not a multiple of the frequency.\n #\n # See https://github.com/pandas-dev/pandas/issues/8683\n\n # 14682 - Since we need to drop the TZ information to perform\n # the adjustment in the presence of a DST change,\n # save TZ Info and the DST state of the first and last parameters\n # so that we can accurately rebuild them at the end.\n first_tzinfo = first.tzinfo\n last_tzinfo = last.tzinfo\n first_dst = bool(first.dst())\n last_dst = bool(last.dst())\n\n first = first.tz_localize(None)\n last = last.tz_localize(None)\n\n start_day_nanos = first.normalize().value\n\n base_nanos = (base % offset.n) * offset.nanos // offset.n\n start_day_nanos += base_nanos\n\n foffset = (first.value - start_day_nanos) % offset.nanos\n loffset = (last.value - start_day_nanos) % offset.nanos\n\n if closed == 'right':\n if foffset > 0:\n # roll back\n fresult = first.value - foffset\n else:\n fresult = first.value - offset.nanos\n\n if loffset > 0:\n # roll forward\n lresult = last.value + (offset.nanos - loffset)\n else:\n # already the end of the road\n lresult = last.value\n else: # closed == 'left'\n if foffset > 0:\n fresult = first.value - foffset\n else:\n # start of the road\n fresult = first.value\n\n if loffset > 0:\n # roll forward\n lresult = last.value + (offset.nanos - loffset)\n else:\n lresult = last.value + offset.nanos\n\n return (Timestamp(fresult).tz_localize(first_tzinfo, ambiguous=first_dst),\n Timestamp(lresult).tz_localize(last_tzinfo, ambiguous=last_dst))\n\n\ndef asfreq(obj, freq, method=None, how=None, normalize=False, fill_value=None):\n \"\"\"\n Utility frequency conversion method for Series/DataFrame\n \"\"\"\n if isinstance(obj.index, PeriodIndex):\n if method is not None:\n raise NotImplementedError(\"'method' argument is not supported\")\n\n if how is None:\n how = 'E'\n\n new_obj = obj.copy()\n new_obj.index = obj.index.asfreq(freq, how=how)\n\n elif len(obj.index) == 0:\n new_obj = obj.copy()\n new_obj.index = obj.index._shallow_copy(freq=to_offset(freq))\n\n else:\n dti = date_range(obj.index[0], obj.index[-1], freq=freq)\n dti.name = obj.index.name\n new_obj = obj.reindex(dti, method=method, fill_value=fill_value)\n if normalize:\n new_obj.index = new_obj.index.normalize()\n\n return new_obj\n" ]
[ [ "pandas.tseries.frequencies.to_offset", "pandas.core.indexes.datetimes.DatetimeIndex", "pandas._libs.tslibs.Timestamp", "pandas.Series", "pandas._libs.lib.generate_bins_dt64", "pandas.core.indexes.datetimes.date_range", "pandas.core.indexes.period.PeriodIndex", "pandas.util._decorators.Substitution", "pandas.errors.AbstractMethodError", "numpy.arange", "pandas.compat.callable", "pandas.tseries.frequencies.is_superperiod", "pandas.core.indexes.timedeltas.TimedeltaIndex", "numpy.insert", "pandas.util._decorators.Appender", "pandas.core.groupby.generic.PanelGroupBy", "pandas.date_range", "pandas.core.api.Series", "numpy.sum", "pandas.tseries.frequencies.is_subperiod", "pandas.core.algorithms.take_1d", "pandas.compat.numpy.function.validate_resampler_func", "pandas.core.groupby.ops.BinGrouper" ] ]
mad-lab-fau/BioPsyK
[ "8ed7a2949e9c03c7d67b9ac6d17948ae218d94c1" ]
[ "src/biopsykit/sleep/plotting.py" ]
[ "\"\"\"Module providing functions to plot data collected during sleep studies.\"\"\"\nimport datetime\nfrom typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union\n\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticks\nimport pandas as pd\nimport seaborn as sns\n\nfrom biopsykit import colors\nfrom biopsykit.utils.datatype_helper import AccDataFrame, GyrDataFrame, ImuDataFrame, SleepEndpointDict\n\n_sleep_imu_plot_params = {\n \"background_color\": [\"#e0e0e0\", \"#9e9e9e\"],\n \"background_alpha\": [0.3, 0.3],\n}\n\n_bbox_default = dict(\n fc=(1, 1, 1, plt.rcParams[\"legend.framealpha\"]),\n ec=plt.rcParams[\"legend.edgecolor\"],\n boxstyle=\"round\",\n)\n\n\ndef sleep_imu_plot(\n data: Union[AccDataFrame, GyrDataFrame, ImuDataFrame],\n datastreams: Optional[Union[str, Sequence[str]]] = None,\n sleep_endpoints: Optional[SleepEndpointDict] = None,\n downsample_factor: Optional[int] = None,\n **kwargs,\n) -> Tuple[plt.Figure, Iterable[plt.Axes]]:\n \"\"\"Draw plot to visualize IMU data during sleep, and, optionally, add sleep endpoints information.\n\n Parameters\n ----------\n data : :class:`~pandas.DataFrame`\n data to plot. Data must either be acceleration data (:obj:`~biopsykit.utils.datatype_helper.AccDataFrame`),\n gyroscope data (:obj:`~biopsykit.utils.datatype_helper.GyrDataFrame`), or IMU data\n (:obj:`~biopsykit.utils.datatype_helper.ImuDataFrame`).\n datastreams : str or list of str, optional\n list of datastreams indicating which type of data should be plotted or ``None`` to only plot acceleration data.\n If more than one type of datastream is specified each datastream is plotted row-wise in its own subplot.\n Default: ``None``\n sleep_endpoints : :obj:`~biopsykit.utils.datatype_helper.SleepEndpointDict`\n dictionary with sleep endpoints to add to plot or ``None`` to only plot IMU data.\n downsample_factor : int, optional\n downsample factor to apply to raw input data before plotting or ``None`` to not downsample data before\n plotting (downsample factor 1). Default: ``None``\n **kwargs\n optional arguments for plot configuration.\n To configure which type of sleep endpoint annotations to plot:\n\n * ``plot_sleep_onset``: whether to plot sleep onset annotations or not: Default: ``True``\n * ``plot_wake_onset``: whether to plot wake onset annotations or not: Default: ``True``\n * ``plot_bed_start``: whether to plot bed interval start annotations or not: Default: ``True``\n * ``plot_bed_end``: whether to plot bed interval end annotations or not: Default: ``True``\n * ``plot_sleep_wake``: whether to plot vspans of detected sleep/wake phases or not: Default: ``True``\n\n To style general plot appearance:\n\n * ``axs``: pre-existing axes for the plot. Otherwise, a new figure and axes objects are created and\n returned.\n * ``figsize``: tuple specifying figure dimensions\n * ``palette``: color palette to plot different axes from input data\n\n To style axes:\n\n * ``xlabel``: label of x axis. Default: \"Time\"\n * ``ylabel``: label of y axis. Default: \"Acceleration :math:`[m/s^2]`\" for acceleration data and\n \"Angular Velocity :math:`[°/s]`\" for gyroscope data\n\n To style legend:\n\n * ``legend_loc``: location of legend. Default: \"lower left\"\n * ``legend_fontsize``: font size of legend labels. Default: \"smaller\"\n\n\n Returns\n -------\n fig : :class:`~matplotlib.figure.Figure`\n figure object\n axs : list of :class:`~matplotlib.axes.Axes`\n list of subplot axes objects\n\n \"\"\"\n axs: List[plt.Axes] = kwargs.pop(\"ax\", kwargs.pop(\"axs\", None))\n\n sns.set_palette(kwargs.get(\"palette\", colors.fau_palette_blue(3)))\n\n if datastreams is None:\n datastreams = [\"acc\"]\n if isinstance(datastreams, str):\n # ensure list\n datastreams = [datastreams]\n\n fig, axs = _sleep_imu_plot_get_fig_axs(axs, len(datastreams), **kwargs)\n\n downsample_factor = _sleep_imu_plot_get_downsample_factor(downsample_factor)\n\n if len(datastreams) != len(axs):\n raise ValueError(\n \"Number of datastreams to be plotted must match number of provided subplots! Expected {}, got {}.\".format(\n len(datastreams), len(axs)\n )\n )\n\n for ax, ds in zip(axs, datastreams):\n _sleep_imu_plot(\n data=data,\n datastream=ds,\n downsample_factor=downsample_factor,\n sleep_endpoints=sleep_endpoints,\n ax=ax,\n **kwargs,\n )\n\n fig.tight_layout()\n fig.autofmt_xdate(rotation=0, ha=\"center\")\n return fig, axs\n\n\ndef _sleep_imu_plot_get_fig_axs(axs: List[plt.Axes], nrows: int, **kwargs):\n figsize = kwargs.get(\"figsize\", None)\n\n if isinstance(axs, plt.Axes):\n # ensure list (if only one Axes object is passed to sleep_imu_plot() instead of a list of Axes objects)\n axs = [axs]\n if axs is None:\n fig, axs = plt.subplots(figsize=figsize, nrows=nrows)\n else:\n fig = axs[0].get_figure()\n if isinstance(axs, plt.Axes):\n # ensure list (if nrows == 1 only one axes object will be created, not a list of axes)\n axs = [axs]\n\n return fig, axs\n\n\ndef _sleep_imu_plot_get_downsample_factor(downsample_factor: int):\n if downsample_factor is None:\n downsample_factor = 1\n # ensure int\n downsample_factor = int(downsample_factor)\n if downsample_factor < 1:\n raise ValueError(\"'downsample_factor' must be >= 1!\")\n return downsample_factor\n\n\ndef _sleep_imu_plot(\n data: pd.DataFrame,\n datastream: str,\n downsample_factor: int,\n sleep_endpoints: SleepEndpointDict,\n ax: plt.Axes,\n **kwargs,\n):\n legend_loc = kwargs.get(\"legend_loc\", \"lower left\")\n legend_fontsize = kwargs.get(\"legend_fontsize\", \"smaller\")\n ylabel = kwargs.get(\"ylabel\", {\"acc\": \"Acceleration [$m/s^2$]\", \"gyr\": \"Angular Velocity [$°/s$]\"})\n xlabel = kwargs.get(\"xlabel\", \"Time\")\n\n if isinstance(data.index, pd.DatetimeIndex):\n plt.rcParams[\"timezone\"] = data.index.tz.zone\n\n data_plot = data.filter(like=datastream)[::downsample_factor]\n data_plot.plot(ax=ax)\n if sleep_endpoints is not None:\n kwargs.setdefault(\"ax\", ax)\n _sleep_imu_plot_add_sleep_endpoints(sleep_endpoints=sleep_endpoints, **kwargs)\n\n if isinstance(data_plot.index, pd.DatetimeIndex):\n # TODO add axis style for non-Datetime axes\n ax.xaxis.set_major_formatter(mdates.DateFormatter(\"%H:%M:%S\"))\n ax.xaxis.set_minor_locator(mticks.AutoMinorLocator(6))\n\n ax.set_ylabel(ylabel[datastream])\n ax.set_xlabel(xlabel)\n ax.legend(loc=legend_loc, fontsize=legend_fontsize, framealpha=1.0)\n\n\ndef _sleep_imu_plot_add_sleep_endpoints(sleep_endpoints: SleepEndpointDict, **kwargs):\n bed_start = pd.to_datetime(sleep_endpoints[\"bed_interval_start\"])\n bed_end = pd.to_datetime(sleep_endpoints[\"bed_interval_end\"])\n sleep_onset = pd.to_datetime(sleep_endpoints[\"sleep_onset\"])\n wake_onset = pd.to_datetime(sleep_endpoints[\"wake_onset\"])\n\n ax = kwargs.pop(\"ax\")\n\n if isinstance(sleep_endpoints, dict):\n sleep_bouts = sleep_endpoints[\"sleep_bouts\"]\n wake_bouts = sleep_endpoints[\"wake_bouts\"]\n date = sleep_endpoints[\"date\"]\n else:\n sleep_bouts = pd.DataFrame(sleep_endpoints[\"sleep_bouts\"][0])\n wake_bouts = pd.DataFrame(sleep_endpoints[\"wake_bouts\"][0])\n date = sleep_endpoints.index[0][1]\n\n date = pd.to_datetime(date)\n\n # 00:00 (12 am) vline (if present)\n if date == bed_start.normalize():\n ax.vlines(\n [date + pd.Timedelta(\"1d\")],\n 0,\n 1,\n transform=ax.get_xaxis_transform(),\n linewidths=3,\n linestyles=\"dotted\",\n colors=colors.fau_color(\"tech\"),\n zorder=0,\n )\n\n _sleep_imu_plot_add_annotations(sleep_onset, wake_onset, bed_start, bed_end, sleep_bouts, wake_bouts, ax, **kwargs)\n\n # wear_time['end'] = wear_time.index.shift(1, freq=pd.Timedelta(\"15M\"))\n # wear_time = wear_time[wear_time['wear'] == 0.0]\n # wear_time = wear_time.reset_index()\n #\n # handle = None\n # for idx, row in wear_time.iterrows():\n # handle = ax.axvspan(row['index'], row['end'], color=colors.fau_color('wiso'), alpha=0.5, lw=0)\n # if handle is not None:\n # handles['non-wear'] = handle\n\n ax.set_title(\"Sleep IMU Data: {} – {}\".format(date.date(), (date + pd.Timedelta(\"1d\")).date()))\n\n\ndef _sleep_imu_plot_add_annotations(\n sleep_onset: datetime.datetime,\n wake_onset: datetime.datetime,\n bed_start: datetime.datetime,\n bed_end: datetime.datetime,\n sleep_bouts,\n wake_bouts,\n ax: plt.Axes,\n **kwargs,\n):\n legend_loc = \"lower right\"\n legend_fontsize = kwargs.get(\"legend_fontsize\", \"smaller\")\n\n plot_sleep_onset = kwargs.get(\"plot_sleep_onset\", True)\n plot_wake_onset = kwargs.get(\"plot_wake_onset\", True)\n plot_bed_start = kwargs.get(\"plot_bed_start\", True)\n plot_bed_end = kwargs.get(\"plot_bed_end\", True)\n plot_sleep_wake = kwargs.get(\"plot_sleep_wake\", True)\n\n if plot_sleep_onset:\n _sleep_imu_plot_add_sleep_onset(sleep_onset, ax, **kwargs)\n if plot_wake_onset:\n _sleep_imu_plot_add_wake_onset(wake_onset, ax, **kwargs)\n if plot_bed_start:\n _sleep_imu_plot_add_bed_start(sleep_onset, bed_start, ax, **kwargs)\n if plot_bed_end:\n _sleep_imu_plot_add_bed_end(wake_onset, bed_end, ax, **kwargs)\n if plot_sleep_wake:\n handles = _sleep_imu_plot_add_sleep_wake_bouts(sleep_bouts, wake_bouts, ax, **kwargs)\n legend = ax.legend(\n handles=list(handles.values()),\n labels=list(handles.keys()),\n loc=legend_loc,\n fontsize=legend_fontsize,\n framealpha=1.0,\n )\n ax.add_artist(legend)\n\n\ndef _sleep_imu_plot_add_sleep_onset(sleep_onset, ax: plt.Axes, **kwargs):\n bbox = kwargs.get(\"bbox\", _bbox_default)\n\n # Sleep Onset vline\n ax.vlines(\n [sleep_onset],\n 0,\n 1,\n transform=ax.get_xaxis_transform(),\n linewidth=3,\n linestyles=\"--\",\n colors=colors.fau_color(\"nat\"),\n zorder=3,\n )\n\n # Sleep Onset Text + Arrow\n ax.annotate(\n \"Sleep Onset\",\n xy=(mdates.date2num(sleep_onset), 0.90),\n xycoords=ax.get_xaxis_transform(),\n xytext=(mdates.date2num(sleep_onset + pd.Timedelta(\"20min\")), 0.90),\n textcoords=ax.get_xaxis_transform(),\n ha=\"left\",\n va=\"center\",\n bbox=bbox,\n arrowprops=dict(\n arrowstyle=\"->\",\n lw=2,\n color=colors.fau_color(\"nat\"),\n shrinkA=0.0,\n shrinkB=0.0,\n ),\n )\n\n\ndef _sleep_imu_plot_add_wake_onset(wake_onset, ax: plt.Axes, **kwargs):\n bbox = kwargs.get(\"bbox\", _bbox_default)\n # Wake Onset vline\n ax.vlines(\n [wake_onset],\n 0,\n 1,\n transform=ax.get_xaxis_transform(),\n linewidth=3,\n linestyles=\"--\",\n colors=colors.fau_color(\"nat\"),\n zorder=3,\n )\n\n # Wake Onset Text + Arrow\n ax.annotate(\n \"Wake Onset\",\n xy=(mdates.date2num(wake_onset), 0.90),\n xycoords=ax.get_xaxis_transform(),\n xytext=(mdates.date2num(wake_onset - pd.Timedelta(\"20min\")), 0.90),\n textcoords=ax.get_xaxis_transform(),\n ha=\"right\",\n va=\"center\",\n bbox=bbox,\n arrowprops=dict(\n arrowstyle=\"->\",\n lw=2,\n color=colors.fau_color(\"nat\"),\n shrinkA=0.0,\n shrinkB=0.0,\n ),\n )\n\n\ndef _sleep_imu_plot_add_bed_start(sleep_onset, bed_start, ax: plt.Axes, **kwargs):\n bbox = kwargs.get(\"bbox\", _bbox_default)\n\n # Bed Start vline\n ax.vlines(\n [bed_start],\n 0,\n 1,\n transform=ax.get_xaxis_transform(),\n linewidth=3,\n linestyles=\"--\",\n colors=colors.fau_color(\"med\"),\n zorder=3,\n )\n # Bed Start Text + Arrow\n ax.annotate(\n \"Bed Interval Start\",\n xy=(mdates.date2num(bed_start), 0.80),\n xycoords=ax.get_xaxis_transform(),\n xytext=(mdates.date2num(sleep_onset + pd.Timedelta(\"20min\")), 0.80),\n textcoords=ax.get_xaxis_transform(),\n ha=\"left\",\n va=\"center\",\n bbox=bbox,\n arrowprops=dict(\n arrowstyle=\"->\",\n lw=2,\n color=colors.fau_color(\"med\"),\n shrinkA=0.0,\n shrinkB=0.0,\n ),\n )\n\n\ndef _sleep_imu_plot_add_bed_end(wake_onset, bed_end, ax: plt.Axes, **kwargs):\n bbox = kwargs.get(\"bbox\", _bbox_default)\n\n # Bed End vline\n ax.vlines(\n [bed_end],\n 0,\n 1,\n transform=ax.get_xaxis_transform(),\n linewidth=3,\n linestyles=\"--\",\n colors=colors.fau_color(\"med\"),\n zorder=3,\n )\n # Bed End Text + Arrow\n ax.annotate(\n \"Bed Interval End\",\n xy=(mdates.date2num(bed_end), 0.80),\n xycoords=ax.get_xaxis_transform(),\n xytext=(mdates.date2num(wake_onset - pd.Timedelta(\"20min\")), 0.80),\n textcoords=ax.get_xaxis_transform(),\n ha=\"right\",\n va=\"center\",\n bbox=bbox,\n arrowprops=dict(\n arrowstyle=\"->\",\n lw=2,\n color=colors.fau_color(\"med\"),\n shrinkA=0.0,\n shrinkB=0.0,\n ),\n )\n\n\ndef _sleep_imu_plot_add_sleep_wake_bouts(\n sleep_bouts: pd.DataFrame, wake_bouts: pd.DataFrame, ax: plt.Axes, **kwargs\n) -> Dict[str, plt.Artist]:\n handles = {}\n for (bout_name, bouts), bg_color, bg_alpha in zip(\n {\"sleep\": sleep_bouts, \"wake\": wake_bouts}.items(),\n kwargs.get(\"background_color\", _sleep_imu_plot_params[\"background_color\"]),\n kwargs.get(\"background_alpha\", _sleep_imu_plot_params[\"background_alpha\"]),\n ):\n handle = None\n for _, bout in bouts.iterrows():\n handle = ax.axvspan(bout[\"start\"], bout[\"end\"], color=bg_color, alpha=bg_alpha)\n\n handles[bout_name] = handle\n\n handles = {k: v for k, v in handles.items() if v is not None}\n return handles\n" ]
[ [ "matplotlib.dates.DateFormatter", "pandas.to_datetime", "matplotlib.ticker.AutoMinorLocator", "matplotlib.pyplot.subplots", "pandas.DataFrame", "pandas.Timedelta", "matplotlib.dates.date2num" ] ]
BLSQ/openhexa-pipelines
[ "54e1afb575d03b0c458492325036d4a994fa2c90" ]
[ "dhis2-extraction/tests/test_dhis2extract.py" ]
[ "import json\nimport os\nimport re\nimport tempfile\nfrom io import StringIO\n\nimport dhis2extract\nimport pandas as pd\nimport pytest\nimport responses\nfrom click.testing import CliRunner\nfrom fsspec.implementations.http import HTTPFileSystem\nfrom fsspec.implementations.local import LocalFileSystem\nfrom s3fs import S3FileSystem\n\nDHIS_INSTANCE = \"play.dhis2.org/2.34.7\"\n\n\ndef test_filesystem():\n assert isinstance(dhis2extract.filesystem(\"/tmp/file.txt\"), LocalFileSystem)\n assert isinstance(dhis2extract.filesystem(\"http://example.com/\"), HTTPFileSystem)\n assert isinstance(dhis2extract.filesystem(\"s3://bucket/dir\"), S3FileSystem)\n # assert isinstance(dhis2extract.filesystem(\"gcs://bucket/dir\"), GCSFileSystem)\n with pytest.raises(ValueError):\n dhis2extract.filesystem(\"bad://bucket/dir\")\n\n\[email protected](scope=\"module\")\[email protected]\ndef demo():\n \"\"\"DHIS2 demo instance APIs.\"\"\"\n\n # mock api/metadata calls before creating the DHIS2 object\n responses_dir = os.path.join(os.path.dirname(__file__), \"responses\")\n for fname in os.listdir(os.path.join(responses_dir, \"metadata\")):\n name = fname.split(\".\")[0]\n with open(os.path.join(responses_dir, \"metadata\", fname)) as f:\n responses.add(\n responses.GET,\n url=re.compile(f\".+metadata.+{name}=True.+\"),\n body=f.read(),\n status=200,\n )\n\n return dhis2extract.DHIS2(DHIS_INSTANCE, \"\", \"\")\n\n\ndef test_dhis2_get_metadata(demo):\n assert len(demo.metadata) == 11\n assert len(demo.metadata.get(\"organisationUnits\")) == 7860\n\n\ndef test_data_element_dataset(demo):\n assert demo.data_element_dataset(\"l6byfWFUGaP\") == \"BfMAe6Itzgt\"\n assert demo.data_element_dataset(\"Bad UID\") is None\n\n\[email protected](\"level,expected\", [(0, 0), (3, 267), (4, 2897)])\ndef test_org_units_per_lvl(demo, level, expected):\n assert len(demo.org_units_per_lvl(level)) == expected\n\n\[email protected]\ndef test_data_value_sets_01(demo):\n \"\"\"With start and end dates arguments.\"\"\"\n responses_dir = os.path.join(os.path.dirname(__file__), \"responses\")\n with open(os.path.join(responses_dir, \"dataValueSets\", \"response01.csv\")) as f:\n responses.add(\n responses.GET,\n url=re.compile(\".+/dataValueSets.csv.+startDate=.+\"),\n body=f.read(),\n status=200,\n )\n csv = demo.data_value_sets(\n start_date=\"2020-01-01\",\n end_date=\"2020-03-01\",\n org_units=[\"VdXuxcNkiad\", \"BNFrspDBKel\"],\n data_elements=[\"l6byfWFUGaP\", \"Boy3QwztgeZ\"],\n )\n df = pd.read_csv(StringIO(csv))\n assert len(df) == 302\n\n\[email protected]\ndef test_data_value_sets_02(demo):\n \"\"\"With periods arguments.\"\"\"\n responses_dir = os.path.join(os.path.dirname(__file__), \"responses\")\n with open(os.path.join(responses_dir, \"dataValueSets\", \"response02.csv\")) as f:\n responses.add(\n responses.GET,\n url=re.compile(\".+/dataValueSets.csv.+period=202004.+\"),\n body=f.read(),\n status=200,\n )\n csv = demo.data_value_sets(\n periods=[\"202004\", \"202006\"],\n org_units=[\"VdXuxcNkiad\", \"BNFrspDBKel\"],\n data_elements=[\"l6byfWFUGaP\", \"Boy3QwztgeZ\"],\n )\n df = pd.read_csv(StringIO(csv))\n assert len(df) == 325\n\n\[email protected]\ndef test_data_value_sets_03(demo):\n \"\"\"With datasets arguments.\"\"\"\n responses_dir = os.path.join(os.path.dirname(__file__), \"responses\")\n with open(os.path.join(responses_dir, \"dataValueSets\", \"response03.csv\")) as f:\n responses.add(\n responses.GET,\n url=re.compile(\".+/dataValueSets.csv.+period=202008.+orgUnit=VdXuxcNkiad\"),\n body=f.read(),\n status=200,\n )\n csv = demo.data_value_sets(\n periods=[\"202008\", \"202010\"],\n org_units=[\"VdXuxcNkiad\", \"BNFrspDBKel\"],\n datasets=[\"BfMAe6Itzgt\", \"QX4ZTUbOt3a\"],\n )\n df = pd.read_csv(StringIO(csv))\n assert len(df) == 196\n\n\[email protected]\ndef test_data_value_sets_04(demo, raw_metadata):\n \"\"\"With loads of org units - requests should be chunked\"\"\"\n\n responses_dir = os.path.join(os.path.dirname(__file__), \"responses\")\n with open(os.path.join(responses_dir, \"dataValueSets\", \"response04.csv\")) as f:\n responses.add(\n responses.GET,\n url=re.compile(\".+/dataValueSets.csv.+period=202008.+\"),\n body=f.read(),\n status=200,\n )\n csv = demo.data_value_sets(\n periods=[\"202008\", \"202010\"],\n org_units=[ou[\"id\"] for ou in raw_metadata[\"organisationUnits\"][:113]],\n datasets=[\"BfMAe6Itzgt\", \"QX4ZTUbOt3a\"],\n )\n df = pd.read_csv(StringIO(csv))\n\n # 113 org units, chunk size=50, we should have 3 requests (they share the same response with 196 lines)\n assert len(df) == 196 * 3\n\n\[email protected]\ndef test_data_value_sets_05(demo):\n \"\"\"With levels arguments - requests should be chunked.\"\"\"\n\n responses_dir = os.path.join(os.path.dirname(__file__), \"responses\")\n with open(os.path.join(responses_dir, \"dataValueSets\", \"response04.csv\")) as f:\n responses.add(\n responses.GET,\n url=re.compile(\".+/dataValueSets.csv.+children=True.+period=202008.+\"),\n body=f.read(),\n status=200,\n )\n csv = demo.data_value_sets(\n periods=[\"202008\", \"202010\"],\n org_unit_levels=[4],\n datasets=[\"BfMAe6Itzgt\", \"QX4ZTUbOt3a\"],\n )\n df = pd.read_csv(StringIO(csv))\n\n # 267 org units for level 4, chunk size=50, we should have 6 requests (they share the same response with 196 lines)\n assert len(df) == 196 * 6\n\n\[email protected]\ndef test_analytics_01(demo):\n \"\"\"With start and end dates arguments.\"\"\"\n responses_dir = os.path.join(os.path.dirname(__file__), \"responses\")\n with open(os.path.join(responses_dir, \"analytics\", \"response01.csv\")) as f:\n responses.add(\n responses.GET,\n url=re.compile(\".+/analytics.csv.+pe%3A202001%3B202002%3B202003.+\"),\n body=f.read(),\n status=200,\n )\n csv = demo.analytics(\n start_date=\"202001\",\n end_date=\"202003\",\n org_units=[\"VdXuxcNkiad\", \"BNFrspDBKel\"],\n data_elements=[\"l6byfWFUGaP\", \"Boy3QwztgeZ\"],\n )\n df = pd.read_csv(StringIO(csv))\n assert len(df) == 25\n\n\[email protected]\ndef test_analytics_02(demo):\n \"\"\"With periods arguments.\"\"\"\n responses_dir = os.path.join(os.path.dirname(__file__), \"responses\")\n with open(os.path.join(responses_dir, \"analytics\", \"response02.csv\")) as f:\n responses.add(\n responses.GET,\n url=re.compile(\".+/analytics.csv.+pe%3A202004%3B202006.+\"),\n body=f.read(),\n status=200,\n )\n csv = demo.analytics(\n periods=[\"202004\", \"202006\"],\n org_units=[\"VdXuxcNkiad\", \"BNFrspDBKel\"],\n data_elements=[\"l6byfWFUGaP\", \"Boy3QwztgeZ\"],\n )\n df = pd.read_csv(StringIO(csv))\n assert len(df) == 21\n\n\[email protected]\ndef test_analytics_raw_data_01(demo):\n \"\"\"With start and end dates arguments.\"\"\"\n responses_dir = os.path.join(os.path.dirname(__file__), \"responses\")\n with open(os.path.join(responses_dir, \"analyticsRawData\", \"response01.csv\")) as f:\n responses.add(\n responses.GET,\n url=re.compile(\".+/analytics/rawData.csv.+startDate=2020-01-01.+\"),\n body=f.read(),\n status=200,\n )\n csv = demo.analytics_raw_data(\n start_date=\"2020-01-01\",\n end_date=\"2020-03-01\",\n org_units=[\"VdXuxcNkiad\", \"BNFrspDBKel\"],\n data_elements=[\"l6byfWFUGaP\", \"Boy3QwztgeZ\"],\n )\n df = pd.read_csv(StringIO(csv))\n assert len(df) == 17\n\n\[email protected]\ndef test_analytics_raw_data_02(demo):\n \"\"\"With periods arguments.\"\"\"\n responses_dir = os.path.join(os.path.dirname(__file__), \"responses\")\n with open(os.path.join(responses_dir, \"analyticsRawData\", \"response02.csv\")) as f:\n responses.add(\n responses.GET,\n url=re.compile(\".+/analytics/rawData.csv.+pe%3A202004%3B202006.+\"),\n body=f.read(),\n status=200,\n )\n csv = demo.analytics_raw_data(\n periods=[\"202004\", \"202006\"],\n org_units=[\"VdXuxcNkiad\", \"BNFrspDBKel\"],\n data_elements=[\"l6byfWFUGaP\", \"Boy3QwztgeZ\"],\n )\n df = pd.read_csv(StringIO(csv))\n assert len(df) == 32\n\n\ndef test_dimension_param():\n\n param = dhis2extract._dimension_param(\n org_units=[\"VdXuxcNkiad\", \"BNFrspDBKel\"],\n org_unit_groups=[\"tDZVQ1WtwpA\", \"MAs88nJc9nL\"],\n org_unit_levels=[4, 5],\n data_elements=[\"l6byfWFUGaP\", \"Boy3QwztgeZ\"],\n indicators=[\"Uvn6LCg7dVU\", \"aGByu8NFs9m\"],\n programs=[\"bMcwwoVnbSR\"],\n periods=[\"202001\", \"202002\", \"202004\"],\n )\n\n # periods\n assert \"pe:202001;202002;202004\" in param\n # org units\n assert \"ou:VdXuxcNkiad;BNFrspDBKel\" in param\n # org unit groups\n assert \"ou:OU_GROUP-tDZVQ1WtwpA;OU_GROUP-MAs88nJc9nL\" in param\n # org unit levels\n assert \"ou:LEVEL-4;LEVEL-5\" in param\n # data elements\n assert \"dx:l6byfWFUGaP;Boy3QwztgeZ\" in param\n # indicators\n assert \"dx:Uvn6LCg7dVU;aGByu8NFs9m\" in param\n # programs\n assert \"dx:bMcwwoVnbSR\" in param\n\n\ndef test_check_iso_date():\n\n assert not dhis2extract._check_iso_date(\"202001\")\n assert dhis2extract._check_iso_date(\"2020-01-01\")\n\n\ndef test_check_dhis2_period():\n\n assert not dhis2extract._check_dhis2_period(\"2020-01-01\")\n assert dhis2extract._check_dhis2_period(\"2020Q1\")\n assert dhis2extract._check_dhis2_period(\"2020\")\n assert dhis2extract._check_dhis2_period(\"202001\")\n\n\[email protected](scope=\"module\")\ndef raw_metadata():\n fpath = os.path.join(os.path.dirname(__file__), \"data\", \"metadata.json\")\n with open(fpath) as f:\n return json.load(f)\n\n\ndef test_transform_org_units(raw_metadata):\n df = dhis2extract._transform_org_units(raw_metadata)\n assert len(df) > 10\n assert \"Rp268JB6Ne4\" in df.ou_uid.unique()\n for column in [\"ou_uid\", \"ou_name\", \"path\"]:\n assert column in df.columns\n\n\ndef test_transform_org_units_geo(raw_metadata):\n fpath = os.path.join(os.path.dirname(__file__), \"data\", \"org_units.csv\")\n df = pd.read_csv(fpath)\n geodf = dhis2extract._transform_org_units_geo(df)\n assert len(geodf) > 10\n assert \"Rp268JB6Ne4\" in df.ou_uid.unique()\n geodf = geodf[pd.notna(geodf.geometry)]\n assert len(geodf) > 10\n\n\ndef test_transform_org_unit_groups(raw_metadata):\n df = dhis2extract._transform_org_unit_groups(raw_metadata)\n assert len(df) > 10\n assert \"CXw2yu5fodb\" in df.oug_uid.unique()\n for column in [\"oug_uid\", \"oug_name\"]:\n assert column in df.columns\n\n\ndef test_transform_data_elements(raw_metadata):\n df = dhis2extract._transform_data_elements(raw_metadata)\n assert len(df) > 10\n assert \"FTRrcoaog83\" in df.dx_uid.unique()\n for column in [\"dx_uid\", \"dx_name\"]:\n assert column in df.columns\n\n\ndef test_transform_indicators(raw_metadata):\n df = dhis2extract._transform_indicators(raw_metadata)\n assert len(df) > 10\n assert \"ReUHfIn0pTQ\" in df.dx_uid.unique()\n for column in [\"dx_uid\", \"dx_name\"]:\n assert column in df.columns\n\n\ndef test_transform_indicator_groups(raw_metadata):\n df = dhis2extract._transform_indicator_groups(raw_metadata)\n assert len(df) > 10\n assert \"oehv9EO3vP7\" in df.ing_uid.unique()\n for column in [\"ing_uid\", \"ing_name\"]:\n assert column in df.columns\n\n\ndef test_transform_datasets(raw_metadata):\n df = dhis2extract._transform_datasets(raw_metadata)\n assert len(df) > 10\n assert \"lyLU2wR22tC\" in df.ds_uid.unique()\n for column in [\"ds_uid\", \"ds_name\", \"data_elements\", \"org_units\"]:\n assert column in df.columns\n\n\ndef test_transform_programs(raw_metadata):\n df = dhis2extract._transform_programs(raw_metadata)\n assert len(df) > 10\n assert \"lxAQ7Zs9VYR\" in df.program_uid.unique()\n for column in [\"program_uid\", \"program_name\"]:\n assert column in df.columns\n\n\ndef test_transform_cat_combos(raw_metadata):\n df = dhis2extract._transform_cat_combos(raw_metadata)\n assert len(df) > 10\n assert \"m2jTvAj5kkm\" in df.cc_uid.unique()\n for column in [\"cc_uid\", \"cc_name\"]:\n assert column in df.columns\n\n\ndef test_transform_cat_options(raw_metadata):\n df = dhis2extract._transform_cat_options(raw_metadata)\n assert len(df) > 10\n assert \"FbLZS3ueWbQ\" in df.co_uid.unique()\n for column in [\"co_uid\", \"co_name\"]:\n assert column in df.columns\n\n\ndef test_transform_categories(raw_metadata):\n df = dhis2extract._transform_categories(raw_metadata)\n assert len(df) > 10\n assert \"KfdsGBcoiCa\" in df.cat_uid.unique()\n for column in [\"cat_uid\", \"cat_name\"]:\n assert column in df.columns\n\n\ndef test_transform_category_option_combos(raw_metadata):\n df = dhis2extract._transform_category_option_combos(raw_metadata)\n assert len(df) > 10\n assert \"S34ULMcHMca\" in df.coc_uid.unique()\n for column in [\"coc_uid\", \"coc_name\"]:\n assert column in df.columns\n\n\ndef test_transform_analytics(raw_metadata):\n fpath = os.path.join(\n os.path.dirname(__file__), \"responses\", \"analytics\", \"response01.csv\"\n )\n df = dhis2extract._transform_analytics(pd.read_csv(fpath))\n assert len(df) == 25\n assert \"Boy3QwztgeZ\" in df.dx_uid.unique()\n assert \"rQLFnNXXIL0\" in df.coc_uid.unique()\n assert \"VdXuxcNkiad\" in df.ou_uid.unique()\n for column in [\"dx_uid\", \"coc_uid\", \"period\", \"ou_uid\", \"value\"]:\n assert column in df.columns\n\n\ndef test_transform_data_value_sets(raw_metadata):\n fpath = os.path.join(\n os.path.dirname(__file__), \"responses\", \"dataValueSets\", \"response01.csv\"\n )\n df = dhis2extract._transform_data_value_sets(pd.read_csv(fpath))\n assert len(df) == 302\n assert \"dY4OCwl0Y7Y\" in df.dx_uid.unique()\n assert \"J2Qf1jtZuj8\" in df.coc_uid.unique()\n assert \"VdXuxcNkiad\" in df.ou_uid.unique()\n for column in [\"dx_uid\", \"coc_uid\", \"period\", \"ou_uid\", \"value\"]:\n assert column in df.columns\n\n\ndef test_transform_analytics_raw_data(raw_metadata):\n fpath = os.path.join(\n os.path.dirname(__file__), \"responses\", \"analyticsRawData\", \"response01.csv\"\n )\n df = dhis2extract._transform_analytics_raw_data(pd.read_csv(fpath))\n assert len(df) == 17\n assert \"l6byfWFUGaP\" in df.dx_uid.unique()\n assert \"Prlt0C1RF0s\" in df.coc_uid.unique()\n assert \"BNFrspDBKel\" in df.ou_uid.unique()\n for column in [\"dx_uid\", \"coc_uid\", \"period\", \"ou_uid\", \"value\"]:\n assert column in df.columns\n\n\[email protected](scope=\"module\")\ndef data_elements():\n \"\"\"Data elements metadata.\"\"\"\n return pd.read_csv(\n os.path.join(os.path.dirname(__file__), \"data\", \"data_elements.csv\"),\n index_col=0,\n )\n\n\[email protected](scope=\"module\")\ndef indicators():\n \"\"\"Indicators metadata.\"\"\"\n return pd.read_csv(\n os.path.join(os.path.dirname(__file__), \"data\", \"indicators.csv\"), index_col=0\n )\n\n\[email protected](scope=\"module\")\ndef category_option_combos():\n \"\"\"COC metadata.\"\"\"\n return pd.read_csv(\n os.path.join(os.path.dirname(__file__), \"data\", \"category_option_combos.csv\"),\n index_col=0,\n )\n\n\[email protected](scope=\"module\")\ndef organisation_units():\n \"\"\"Org units metadata.\"\"\"\n return pd.read_csv(\n os.path.join(os.path.dirname(__file__), \"data\", \"organisation_units.csv\"),\n index_col=0,\n )\n\n\[email protected](\n \"dx_uid,expected\", [(\"ReUHfIn0pTQ\", \"Indicator\"), (\"FTRrcoaog83\", \"Data Element\")]\n)\ndef test_dx_type(dx_uid, expected, data_elements, indicators):\n assert dhis2extract._dx_type(dx_uid, data_elements, indicators) == expected\n\n\[email protected](\n \"dx_uid,expected\",\n [(\"A21lT9x7pmc\", \"Cabin fever\"), (\"aGByu8NFs9m\", \"Well nourished rate\")],\n)\ndef test_dx_name(dx_uid, expected, data_elements, indicators):\n assert dhis2extract._dx_name(dx_uid, data_elements, indicators) == expected\n\n\[email protected](\n \"level,expected\", [(1, \"ImspTQPwCqd\"), (3, \"qtr8GGlm4gg\"), (5, None)]\n)\ndef test_level_uid(level, expected):\n assert (\n dhis2extract._level_uid(\n \"/ImspTQPwCqd/at6UHUQatSo/qtr8GGlm4gg/Rp268JB6Ne4\", level\n )\n == expected\n )\n\n\ndef test_join_metadata(\n data_elements, indicators, category_option_combos, organisation_units\n):\n extract = pd.read_csv(\n os.path.join(os.path.dirname(__file__), \"data\", \"extract.csv\")\n )\n merge = dhis2extract._join_from_metadata(\n extract, data_elements, indicators, category_option_combos, organisation_units\n )\n\n for column in [\"dx_name\", \"dx_type\", \"coc_name\", \"level_1_uid\", \"level_1_name\"]:\n assert column in merge.columns\n\n assert len(merge) == len(extract)\n assert merge.isna().values.sum() == 0\n assert (merge.level_1_name == \"Sierra Leone\").all()\n\n\[email protected](reason=\"responses not mocked\")\ndef test_download_data_value_sets():\n runner = CliRunner()\n with tempfile.TemporaryDirectory() as tmp_dir:\n result = runner.invoke(\n dhis2extract.cli,\n [\n \"download\",\n \"--instance\",\n DHIS_INSTANCE,\n \"--username\",\n \"\",\n \"--password\",\n \"\",\n \"--output-dir\",\n tmp_dir,\n \"--start\",\n \"2020-01-01\",\n \"--end\",\n \"2020-03-01\",\n \"-ou\",\n \"VdXuxcNkiad\",\n \"-ou\",\n \"BNFrspDBKel\",\n \"-de\",\n \"l6byfWFUGaP\",\n \"-de\",\n \"Boy3QwztgeZ\",\n \"--no-aggregate\",\n \"--no-analytics\",\n ],\n )\n assert result.exit_code == 0\n assert \"data_value_sets.csv\" in os.listdir(tmp_dir)\n assert \"metadata.json\" in os.listdir(tmp_dir)\n\n\[email protected](reason=\"responses not mocked\")\ndef test_download_analytics():\n runner = CliRunner()\n with tempfile.TemporaryDirectory() as tmp_dir:\n result = runner.invoke(\n dhis2extract.cli,\n [\n \"download\",\n \"--instance\",\n DHIS_INSTANCE,\n \"--username\",\n \"\",\n \"--password\",\n \"\",\n \"--output-dir\",\n tmp_dir,\n \"--start\",\n \"202001\",\n \"--end\",\n \"202003\",\n \"-ou\",\n \"VdXuxcNkiad\",\n \"-ou\",\n \"BNFrspDBKel\",\n \"-de\",\n \"l6byfWFUGaP\",\n \"-de\",\n \"Boy3QwztgeZ\",\n \"--aggregate\",\n \"--analytics\",\n ],\n )\n assert result.exit_code == 0\n assert \"analytics.csv\" in os.listdir(tmp_dir)\n assert \"metadata.json\" in os.listdir(tmp_dir)\n\n\[email protected](reason=\"responses not mocked\")\ndef test_download_analytics_raw_data():\n runner = CliRunner()\n with tempfile.TemporaryDirectory() as tmp_dir:\n result = runner.invoke(\n dhis2extract.cli,\n [\n \"download\",\n \"--instance\",\n DHIS_INSTANCE,\n \"--username\",\n \"\",\n \"--password\",\n \"\",\n \"--output-dir\",\n tmp_dir,\n \"--start\",\n \"2020-01-01\",\n \"--end\",\n \"2020-03-01\",\n \"-ou\",\n \"VdXuxcNkiad\",\n \"-ou\",\n \"BNFrspDBKel\",\n \"-de\",\n \"l6byfWFUGaP\",\n \"-de\",\n \"Boy3QwztgeZ\",\n \"--no-aggregate\",\n \"--analytics\",\n ],\n )\n assert result.exit_code == 0\n assert \"analytics_raw_data.csv\" in os.listdir(tmp_dir)\n assert \"metadata.json\" in os.listdir(tmp_dir)\n\n\[email protected](reason=\"responses not mocked\")\ndef test_download_transform():\n runner = CliRunner()\n with tempfile.TemporaryDirectory() as tmp_dir:\n result = runner.invoke(\n dhis2extract.cli,\n [\n \"download\",\n \"--instance\",\n DHIS_INSTANCE,\n \"--username\",\n \"\",\n \"--password\",\n \"\",\n \"--output-dir\",\n tmp_dir,\n \"--start\",\n \"202001\",\n \"--end\",\n \"202003\",\n \"-ou\",\n \"VdXuxcNkiad\",\n \"-ou\",\n \"BNFrspDBKel\",\n \"-de\",\n \"l6byfWFUGaP\",\n \"-de\",\n \"Boy3QwztgeZ\",\n \"--aggregate\",\n \"--analytics\",\n ],\n )\n assert result.exit_code == 0\n\n result = runner.invoke(\n dhis2extract.cli, [\"transform\", \"-i\", tmp_dir, \"-o\", tmp_dir]\n )\n\n assert result.exit_code == 0\n assert \"extract.csv\" in os.listdir(tmp_dir)\n for fname in [\n \"organisation_units.gpkg\",\n \"categories.csv\",\n \"category_combos.csv\",\n \"category_options.csv\",\n \"category_option_combos.csv\",\n \"programs.csv\",\n \"datasets.csv\",\n \"indicator_groups.csv\",\n \"indicators.csv\",\n \"data_elements.csv\",\n \"organisation_unit_groups.csv\",\n \"organisation_units.csv\",\n ]:\n assert fname in os.listdir(os.path.join(tmp_dir, \"metadata\"))\n" ]
[ [ "pandas.notna", "pandas.read_csv" ] ]
zhouqiang06/ard-gap-filler
[ "98ee5688619f1ed3b9b8f0a9721b00c82219ccdb" ]
[ "test/ard_gap_filler_test.py" ]
[ "import ard_gap_filler as gf\r\n\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport pickle\r\n\r\nif __name__ == '__main__':\r\n\r\n ##############\r\n # Example of pixel time series fill\r\n # A quick and easy way to check the pixel results\r\n ######################\r\n work_dir = r'test/resources/'\r\n n_cpu = 1\r\n dates_list = np.load(os.path.join(work_dir, 'dates.npy'), allow_pickle=True)\r\n outname = os.path.join(work_dir, 'training_Atlanta')\r\n training_data_path = '{}.pkl'.format(outname)\r\n cluster_model_path = '{}.model'.format(outname)\r\n training_data = pd.read_pickle(training_data_path)\r\n date_idx = training_data.index.isin(dates_list)\r\n training_data = training_data[date_idx].values.T\r\n cluster_model = pickle.load(open(cluster_model_path, 'rb'))\r\n labels = cluster_model.labels_\r\n centroids = cluster_model.cluster_centers_\r\n centroids = centroids[:, date_idx]\r\n ts_org = np.load(r'test/resources/data/clear_full_2591.npy')\r\n ts_y = ts_org[800, :]\r\n gap_filled, gap_filled_std = gf.gap_fill_pixel(ts_y, training_data, labels, centroids)\r\n \r\n # plot the results #\r\n ts_y = np.ma.masked_array(ts_y, mask=(ts_y == 0))\r\n plt.scatter(dates_list, gap_filled, marker='o', alpha=0.7, label='Gap filled time series')\r\n plt.scatter(dates_list, ts_y, marker='+', alpha=1.0, label='Orginal time series')\r\n plt.legend()\r\n plt.show()\r\n #####################################################################\r\n\r\n ###################\r\n # Example of slice time series fill\r\n # This may take several hours depending on how many computing resource\r\n # #####################\r\n # work_dir = r'test/resources/'\r\n # outDir = r'test/resources/data_export'\r\n # if not os.path.exists(outDir):\r\n # os.mkdir(outDir)\r\n #\r\n # file_list = []\r\n # for name in glob.glob('test/resources/data/*.npy'):\r\n # file_list.append(name)\r\n #\r\n # n_cpu = 1\r\n # dates_list = np.load(os.path.join(work_dir, 'dates.npy'), allow_pickle=True)\r\n # outname = os.path.join(work_dir, 'training_Atlanta')\r\n # training_data_path = '{}.pkl'.format(outname)\r\n # cluster_model_path = '{}.model'.format(outname)\r\n # gf.fill_gaps_batch(file_list, dates_list, training_data_path, cluster_model_path, cpu=n_cpu, outDir=outDir)\r\n # # plot example result\r\n # dates_list = np.load(os.path.join(work_dir, 'dates.npy'), allow_pickle=True)\r\n # ts_org = np.load(os.path.join(work_dir, 'data', 'clear_full_2500.npy'))\r\n # ts_filled = np.load(os.path.join(outDir, 'clear_full_2500_gapfilled.npy'))\r\n # ts_org = np.ma.masked_array(ts_org, mask=(ts_org == 0))\r\n # print('ts_org[100, :], ', ts_org[100, :])\r\n # print(ts_org.shape, dates_list)\r\n # print('ts_filled[100, :], ', ts_filled[100, :])\r\n # plt.scatter(dates_list, ts_filled[100, :], marker='o', alpha=0.7, label='Gap filled time series')\r\n # plt.scatter(dates_list, ts_org[100, :], marker='+', alpha=1.0, label='Orginal time series')\r\n # plt.legend()\r\n # plt.show()\r\n ####################################################\r\n\r\n\r\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.scatter", "numpy.ma.masked_array", "numpy.load", "pandas.read_pickle", "matplotlib.pyplot.show" ] ]
unkcpz/aiida-jdftx
[ "d733c1e8525a8cac882fc0252e50a93f84723b01" ]
[ "aiida_jdftx/workflows/base.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Workchain to run a JDFTx's jdftx calculation with automated error handling and restarts.\"\"\"\n\nfrom aiida import orm\nfrom aiida.engine import BaseRestartWorkChain, while_\nfrom aiida.plugins import CalculationFactory\nfrom aiida.common import AttributeDict\nfrom aiida.engine import calcfunction\n\nJdftxCalculation = CalculationFactory('jdftx')\n\n# -*- coding: utf-8 -*-\n\"\"\"Calculation function to compute a k-point mesh for a structure with a guaranteed minimum k-point distance.\"\"\"\n\n\n@calcfunction\ndef create_kpoints_from_distance(structure, distance, force_parity):\n \"\"\"Generate a uniformly spaced kpoint mesh for a given structure.\n The spacing between kpoints in reciprocal space is guaranteed to be at least the defined distance.\n :param structure: the StructureData to which the mesh should apply\n :param distance: a Float with the desired distance between kpoints in reciprocal space\n :param force_parity: a Bool to specify whether the generated mesh should maintain parity\n :returns: a KpointsData with the generated mesh\n \"\"\"\n from numpy import linalg\n from aiida.orm import KpointsData\n\n epsilon = 1E-5\n\n kpoints = KpointsData()\n kpoints.set_cell_from_structure(structure)\n kpoints.set_kpoints_mesh_from_density(distance.value,\n force_parity=force_parity.value)\n\n lengths_vector = [linalg.norm(vector) for vector in structure.cell]\n lengths_kpoint = kpoints.get_kpoints_mesh()[0]\n\n is_symmetric_cell = all(\n abs(length - lengths_vector[0]) < epsilon for length in lengths_vector)\n is_symmetric_mesh = all(length == lengths_kpoint[0]\n for length in lengths_kpoint)\n\n # If the vectors of the cell all have the same length, the kpoint mesh should be isotropic as well\n if is_symmetric_cell and not is_symmetric_mesh:\n nkpoints = max(lengths_kpoint)\n kpoints.set_kpoints_mesh([nkpoints, nkpoints, nkpoints])\n\n return kpoints\n\n\nclass JdftxBaseWorkChain(BaseRestartWorkChain):\n \"\"\"Workchain to run a JDFTx's jdftx calculation with automated error handling and restarts.\"\"\"\n\n _process_class = JdftxCalculation\n\n @classmethod\n def define(cls, spec):\n \"\"\"Define the process specification.\"\"\"\n # yapf: disable\n super().define(spec)\n spec.expose_inputs(JdftxCalculation, namespace='jdftx', exclude=('kpoints',))\n spec.input('jdftx.metadata.options.resources', valid_type=dict, required=False)\n spec.input('kpoints', valid_type=orm.KpointsData, required=False,\n help='An explicit k-points list or mesh. Either this or `kpoints_distance` has to be provided.')\n spec.input('kpoints_distance', valid_type=orm.Float, required=False,\n help='The minimum desired distance in 1/Å between k-points in reciprocal space. The explicit k-points will '\n 'be generated automatically by a calculation function based on the input structure.')\n spec.input('kpoints_force_parity', valid_type=orm.Bool, required=False,\n help='Optional input when constructing the k-points based on a desired `kpoints_distance`. Setting this to '\n '`True` will force the k-point mesh to have an even number of points along each lattice vector except '\n 'for any non-periodic directions.')\n\n spec.outline(\n cls.setup,\n cls.validate_kpoints,\n while_(cls.should_run_process)(\n cls.run_process,\n cls.inspect_process,\n ),\n cls.results,\n )\n\n spec.expose_outputs(JdftxCalculation)\n\n spec.exit_code(202, 'ERROR_INVALID_INPUT_KPOINTS',\n message='Neither the `kpoints` nor the `kpoints_distance` input was specified.')\n\n def setup(self):\n \"\"\"Call the `setup` of the `BaseRestartWorkChain` and then create the inputs dictionary in `self.ctx.inputs`.\n\n This `self.ctx.inputs` dictionary will be used by the `BaseRestartWorkChain` to submit the calculations in the\n internal loop.\n \"\"\"\n super().setup()\n self.ctx.restart_calc = None\n self.ctx.inputs = AttributeDict(self.exposed_inputs(JdftxCalculation, 'jdftx'))\n\n def validate_kpoints(self):\n \"\"\"Validate the inputs related to k-points.\n Either an explicit `KpointsData` with given mesh/path, or a desired k-points distance should be specified. In\n the case of the latter, the `KpointsData` will be constructed for the input `StructureData` using the\n `create_kpoints_from_distance` calculation function.\n \"\"\"\n if all([key not in self.inputs for key in ['kpoints', 'kpoints_distance']]):\n return self.exit_codes.ERROR_INVALID_INPUT_KPOINTS # pylint: disable=no-member\n\n try:\n kpoints = self.inputs.kpoints\n except AttributeError:\n inputs = {\n 'structure': self.inputs.jdftx.structure,\n 'distance': self.inputs.kpoints_distance,\n 'force_parity': self.inputs.get('kpoints_force_parity', orm.Bool(False)),\n 'metadata': {\n 'call_link_label': 'create_kpoints_from_distance'\n }\n }\n kpoints = create_kpoints_from_distance(**inputs) # pylint: disable=unexpected-keyword-arg\n\n self.ctx.inputs.kpoints = kpoints\n" ]
[ [ "numpy.linalg.norm" ] ]
THU-DA-6D-Pose-Group/mx-DeepIM
[ "74b6df2e3f6be7d6fed23ba2f553dab5ae950700" ]
[ "lib/render_glumpy/render_py_multi.py" ]
[ "# --------------------------------------------------------\n# Deep Iterative Matching Network\n# Licensed under The Apache-2.0 License [see LICENSE for details]\n# Written by Yi Li\n# --------------------------------------------------------\nfrom __future__ import print_function, division\nimport numpy as np\nfrom glumpy import app, gl, gloo, data, log\nimport logging\nimport os\nimport os.path as osp\nimport sys\n\ncur_dir = os.path.abspath(os.path.dirname(__file__))\nsys.path.insert(1, osp.join(cur_dir, '../..'))\n\nlog.setLevel(logging.ERROR) # ERROR, WARNING, DEBUG, INFO\nfrom lib.pair_matching.RT_transform import quat2mat\n\n\nclass Render_Py:\n vertex = \"\"\"\n uniform mat4 u_model; // Model matrix\n uniform mat4 u_view; // View matrix\n uniform mat4 u_projection; // Projection matrix\n attribute vec3 position;\n attribute vec2 texcoord;\n varying vec2 v_texcoord;\n\n void main()\n {\n // Assign varying variables\n v_texcoord = texcoord;\n\n // Final position\n gl_Position = u_projection * u_view * u_model * vec4(position, 1.0);\n }\n \"\"\"\n\n fragment = \"\"\"\n uniform sampler2D u_texture; // Texture\n varying vec2 v_texcoord; // Interpolated fragment texture coordinates (in)\n\n void main()\n {\n // Get texture color\n vec4 t_color = texture2D(u_texture, v_texcoord);\n\n // Final color\n gl_FragColor = t_color;\n }\n \"\"\"\n\n def __init__(self, model_dir, classes, K, width=640, height=480, zNear=0.25, zFar=6.0):\n self.width = width\n self.height = height\n self.zNear = zNear\n self.zFar = zFar\n self.K = K\n self.model_dir = model_dir\n\n self.rgb_buffer = np.zeros((self.height, self.width, 4), dtype=np.float32)\n self.depth_buffer = np.zeros((self.height, self.width), dtype=np.float32)\n\n log.info(\"Loading mesh\")\n self.render_kernel_list = [[] for cls in classes]\n self.classes = classes\n self.cls_idx = 0\n for class_id, cur_class in enumerate(classes):\n model_folder = os.path.join(model_dir, cur_class)\n print(\"Loading {}\".format(model_folder))\n vertices, indices = data.objload(\"{}/textured.obj\".format(model_folder), rescale=False)\n render_kernel = gloo.Program(self.vertex, self.fragment)\n render_kernel.bind(vertices)\n log.info(\"Loading texture\")\n render_kernel[\"u_texture\"] = np.copy(data.load(\"{}/texture_map.png\".format(model_folder))[::-1, :, :])\n\n render_kernel[\"u_model\"] = np.eye(4, dtype=np.float32)\n u_projection = self.my_compute_calib_proj(K, width, height, zNear, zFar)\n render_kernel[\"u_projection\"] = np.copy(u_projection)\n self.render_kernel_list[class_id] = render_kernel\n print(\"************Finish loading models*************\")\n\n self.window = app.Window(width=width, height=height, visible=False)\n print(\"self.window: \", self.window)\n print(\"self.render_kernel at init: \", self.render_kernel_list)\n\n @self.window.event\n def on_draw(dt):\n self.window.clear()\n gl.glDisable(gl.GL_BLEND)\n gl.glEnable(gl.GL_DEPTH_TEST)\n self.render_kernel_list[self.cls_idx].draw(gl.GL_TRIANGLES)\n gl.glReadPixels(0, 0, self.width, self.height, gl.GL_RGBA, gl.GL_FLOAT, self.rgb_buffer)\n gl.glReadPixels(0, 0, self.width, self.height, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, self.depth_buffer)\n\n @self.window.event\n def on_init():\n gl.glEnable(gl.GL_DEPTH_TEST)\n\n def render(self, cls_idx, r, t, r_type=\"quat\", K=None):\n if r_type == \"quat\":\n R = quat2mat(r)\n elif r_type == \"mat\":\n R = r\n\n self.cls_idx = cls_idx\n self.render_kernel_list[cls_idx][\"u_view\"] = self._get_view_mtx(R, t)\n\n if K is not None:\n u_projection = self.my_compute_calib_proj(K, self.width, self.height, self.zNear, self.zFar)\n self.render_kernel_list[cls_idx][\"u_projection\"] = np.copy(u_projection)\n\n # import time\n # t = time.time()\n app.run(framecount=0, framerate=0)\n # print(\"render {} seconds/image\".format(time.time()-t))\n # app.run()\n\n rgb_gl = np.flipud(self.rgb_buffer)\n depth_gl = np.flipud(self.depth_buffer)\n\n bgr_gl = rgb_gl[:, :, [2, 1, 0]] # convert to BGR format as cv2\n bgr_gl *= 255\n\n depth_bg = depth_gl == 1\n depth_gl = 2 * self.zFar * self.zNear / (self.zFar + self.zNear - (self.zFar - self.zNear) * (2 * depth_gl - 1))\n depth_gl[depth_bg] = 0\n return bgr_gl, depth_gl\n\n def __del__(self):\n self.window.close()\n\n def my_compute_calib_proj(self, K, w, h, zNear, zFar):\n u0 = K[0, 2] + 0.5\n v0 = K[1, 2] + 0.5\n fu = K[0, 0]\n fv = K[1, 1]\n L = +(u0) * zNear / -fu\n T = +(v0) * zNear / fv\n R = -(w - u0) * zNear / -fu\n B = -(h - v0) * zNear / fv\n proj = np.zeros((4, 4))\n proj[0, 0] = 2 * zNear / (R - L)\n proj[1, 1] = 2 * zNear / (T - B)\n proj[2, 2] = -(zFar + zNear) / (zFar - zNear)\n proj[2, 0] = (R + L) / (R - L)\n proj[2, 1] = (T + B) / (T - B)\n proj[2, 3] = -1.0\n proj[3, 2] = -(2 * zFar * zNear) / (zFar - zNear)\n return proj\n\n def _get_view_mtx(self, R, t):\n u_view = np.eye(4, dtype=np.float32)\n u_view[:3, :3], u_view[:3, 3] = R, t.squeeze()\n yz_flip = np.eye(4, dtype=np.float32)\n yz_flip[1, 1], yz_flip[2, 2] = -1, -1\n u_view = yz_flip.dot(u_view) # OpenCV to OpenGL camera system\n u_view = u_view.T # OpenGL expects column-wise matrix format\n return u_view\n\n\nif __name__ == \"__main__\":\n import cv2\n import matplotlib.pyplot as plt\n\n def mat2quat(M):\n # Qyx refers to the contribution of the y input vector component to\n # the x output vector component. Qyx is therefore the same as\n # M[0,1]. The notation is from the Wikipedia article.\n Qxx, Qyx, Qzx, Qxy, Qyy, Qzy, Qxz, Qyz, Qzz = M.flat\n # Fill only lower half of symmetric matrix\n K = (\n np.array(\n [\n [Qxx - Qyy - Qzz, 0, 0, 0],\n [Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0],\n [Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0],\n [Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz],\n ]\n )\n / 3.0\n )\n # Use Hermitian eigenvectors, values for speed\n vals, vecs = np.linalg.eigh(K)\n # Select largest eigenvector, reorder to w,x,y,z quaternion\n q = vecs[[3, 0, 1, 2], np.argmax(vals)]\n if q[0] < 0:\n q *= -1\n return q\n\n classes = [\"driller\"] # '002_master_chef_can'\n model_dir = os.path.join(cur_dir, \"../../data/LINEMOD_6D/LM6d_converted/LM6d_refine/models/\")\n pose_path = os.path.join(\n cur_dir, \"../../data/LINEMOD_6D/LM6d_converted/LM6d_refine/data/gt_observed/{}/{}-pose.txt\"\n )\n color_path = os.path.join(\n cur_dir, \"../../data/LINEMOD_6D/LM6d_converted/LM6d_refine/data/gt_observed/{}/{}-color.png\"\n )\n depth_path = os.path.join(\n cur_dir, \"../../data/LINEMOD_6D/LM6d_converted/LM6d_refine/data/gt_observed/{}/{}-depth.png\"\n )\n K = np.array([[572.4114, 0.0, 325.2611], [0.0, 573.57043, 242.04899], [0.0, 0.0, 1.0]])\n\n width = 640\n height = 480\n ZNEAR = 0.25\n ZFAR = 6.0\n img_idx_list = [\"000001\", \"000001\"]\n\n render_machine = Render_Py(model_dir, classes, K, width, height, ZNEAR, ZFAR)\n for idx in range(len(classes)):\n # warm up\n bgr_gl, _ = render_machine.render(idx, (0.5, 0.5, 0.5, 0.5), np.array([0, 0, 1]))\n bgr_gl = bgr_gl.astype(np.uint8)\n fig = plt.figure()\n fig.add_subplot(2, 3, 1)\n plt.axis('off')\n plt.imshow(bgr_gl[:, :, [2, 1, 0]])\n plt.show()\n\n cur_class = classes[idx]\n cur_img_idx = img_idx_list[idx]\n pose_real_est = np.loadtxt(pose_path.format(cur_class, cur_img_idx), skiprows=1)\n r_quat = mat2quat(pose_real_est[:, :3])\n t = pose_real_est[:, 3]\n import time\n\n start_t = time.time()\n bgr_gl, depth_gl = render_machine.render(idx, r_quat, t)\n print(\"using {} seconds\".format(time.time() - start_t))\n bgr_gl = bgr_gl.astype(np.uint8)\n c = np.dot(K, t)\n c_x = int(round(c[0] / c[2]))\n c_y = int(round(c[1] / c[2]))\n bgr_gl[c_y, c_x, :] = np.array([255, 0, 0])\n\n bgr_pa = cv2.imread(color_path.format(cur_class, cur_img_idx), cv2.IMREAD_COLOR)\n depth_pa = (\n cv2.imread(depth_path.format(cur_class, cur_img_idx), cv2.IMREAD_UNCHANGED).astype(np.float32) / 1000.0\n )\n\n fig = plt.figure()\n fig.add_subplot(2, 3, 1)\n plt.imshow(bgr_gl[:, :, [2, 1, 0]])\n plt.axis('off')\n\n fig.add_subplot(2, 3, 2)\n plt.imshow(bgr_pa[:, :, [2, 1, 0]])\n plt.axis('off')\n\n fig.add_subplot(2, 3, 3)\n plt.imshow(bgr_gl - bgr_pa)\n plt.axis('off')\n\n fig.add_subplot(2, 3, 4)\n plt.imshow(depth_gl)\n plt.axis('off')\n\n fig.add_subplot(2, 3, 5)\n plt.imshow(depth_pa)\n plt.axis('off')\n\n fig.add_subplot(2, 3, 6)\n plt.imshow(depth_gl - depth_pa)\n plt.axis('off')\n\n plt.show()\n" ]
[ [ "numpy.dot", "matplotlib.pyplot.imshow", "numpy.eye", "numpy.flipud", "numpy.copy", "numpy.linalg.eigh", "numpy.argmax", "matplotlib.pyplot.axis", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
act-elegancy/consumet
[ "291eb6ad1cfbb1ca4f04ed3bc7a97c05783183b0" ]
[ "doc/examples/rosenbrock/plot_csv.py" ]
[ "#!/usr/bin/env python\n\n'''\nThis file demonstrates how to import surrgate \nmodels by loading the `regression.csv` file.\n'''\n\n########################################\n# Recreate the surrogate model\n########################################\n\n# Load regression coefficients from file\nimport numpy as np\ndata = np.genfromtxt('regression.csv', delimiter=',')\n\n# Reimplement the polynomial surrogate model\ndef rosenbrock(x, y):\n return sum([θ * x**n * y**m for _, n, m, θ in data])\n\n\n########################################\n# Evaluate surrogate models\n########################################\n\n# Make a 200×200 coordinate grid\nxs = np.linspace(0, 1, 200)\nys = np.linspace(0, 1, 200)\n\n# Evaluate the function on grid\nzs = np.zeros((200, 200))\nfor i, x in enumerate(xs):\n for j, y in enumerate(ys):\n zs[j,i] = rosenbrock(x, y)\n\n\n########################################\n# Plot surrogate models\n########################################\n\n# Load plotting library\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\n# Convert coordinates into mesh\nxs, ys = np.meshgrid(xs, ys)\n\n# Make a new figure with 3d axes\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.view_init(45, -125)\n\n# Plot the results\nax.plot_surface(xs, ys, zs)\nplt.show()\n" ]
[ [ "numpy.linspace", "numpy.genfromtxt", "numpy.meshgrid", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
WangYuanZhiWang/DualGCN-ABSA
[ "2ae75fc9da3200609f8ae7ea10e1f8ef4988f95b" ]
[ "DualGCN/models/dualgcn.py" ]
[ "'''\nDescription: \nversion: \nAuthor: chenhao\nDate: 2021-06-09 14:17:37\n\n重点看一下这个普通DualGCN模型,跟他论文里的那个图对应上\n'''\nimport copy\nimport math\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom tree import head_to_tree, tree_to_adj\n\n\nclass DualGCNClassifier(nn.Module): # 这个是模型主体\n def __init__(self, embedding_matrix, opt):\n super().__init__()\n in_dim = opt.hidden_dim\n self.opt = opt\n self.gcn_model = GCNAbsaModel(embedding_matrix=embedding_matrix, opt=opt)\n self.classifier = nn.Linear(in_dim*2, opt.polarities_dim) # 看来absamodel包含对句子向量的所有处理\n\n def forward(self, inputs):\n outputs1, outputs2, adj_ag, adj_dep = self.gcn_model(inputs) # absamodel输出两个向量\n final_outputs = torch.cat((outputs1, outputs2), dim=-1) # 拼一下然后做分类\n logits = self.classifier(final_outputs)\n\n # 他这些正则处理时基于正交假设和差异愿望的,目的是修正注意力矩阵,都是对semGCN的操作\n # 但是最终形式上都体现在损失函数上面\n # 不同层还有积累两种正则化参数的需要\n adj_ag_T = adj_ag.transpose(1, 2)\n identity = torch.eye(adj_ag.size(1)).cuda()\n identity = identity.unsqueeze(0).expand(adj_ag.size(0), adj_ag.size(1), adj_ag.size(1))\n ortho = adj_ag@adj_ag_T\n\n for i in range(ortho.size(0)):\n ortho[i] -= torch.diag(torch.diag(ortho[i]))\n ortho[i] += torch.eye(ortho[i].size(0)).cuda()\n\n penal = None\n if self.opt.losstype == 'doubleloss':\n penal1 = (torch.norm(ortho - identity) / adj_ag.size(0)).cuda()\n penal2 = (adj_ag.size(0) / torch.norm(adj_ag - adj_dep)).cuda()\n penal = self.opt.alpha * penal1 + self.opt.beta * penal2\n \n elif self.opt.losstype == 'orthogonalloss':\n penal = (torch.norm(ortho - identity) / adj_ag.size(0)).cuda()\n penal = self.opt.alpha * penal\n\n elif self.opt.losstype == 'differentiatedloss':\n penal = (adj_ag.size(0) / torch.norm(adj_ag - adj_dep)).cuda()\n penal = self.opt.beta * penal\n \n return logits, penal\n\n\nclass GCNAbsaModel(nn.Module):\n def __init__(self, embedding_matrix, opt):\n super().__init__()\n self.opt = opt\n self.embedding_matrix = embedding_matrix\n self.emb = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float), freeze=True)\n self.pos_emb = nn.Embedding(opt.pos_size, opt.pos_dim, padding_idx=0) if opt.pos_dim > 0 else None # POS emb\n self.post_emb = nn.Embedding(opt.post_size, opt.post_dim, padding_idx=0) if opt.post_dim > 0 else None # position emb\n embeddings = (self.emb, self.pos_emb, self.post_emb)\n # gcn layer\n self.gcn = GCN(opt, embeddings, opt.hidden_dim, opt.num_layers)\n\n def forward(self, inputs):\n tok, asp, pos, head, deprel, post, mask, l, adj = inputs # unpack inputs\n maxlen = max(l.data)\n mask = mask[:, :maxlen]\n if self.opt.parseadj:\n adj_dep = adj[:, :maxlen, :maxlen].float()\n else:\n def inputs_to_tree_reps(head, words, l):\n trees = [head_to_tree(head[i], words[i], l[i]) for i in range(len(l))]\n adj = [tree_to_adj(maxlen, tree, directed=self.opt.direct, self_loop=self.opt.loop).reshape(1, maxlen, maxlen) for tree in trees]\n adj = np.concatenate(adj, axis=0)\n adj = torch.from_numpy(adj)\n return adj.cuda()\n adj_dep = inputs_to_tree_reps(head.data, tok.data, l.data)\n\n h1, h2, adj_ag = self.gcn(adj_dep, inputs)\n # avg pooling asp feature\n asp_wn = mask.sum(dim=1).unsqueeze(-1) # aspect words num\n mask = mask.unsqueeze(-1).repeat(1,1,self.opt.hidden_dim) # mask for h\n outputs1 = (h1*mask).sum(dim=1) / asp_wn\n outputs2 = (h2*mask).sum(dim=1) / asp_wn\n \n return outputs1, outputs2, adj_ag, adj_dep\n\nclass GCN(nn.Module):\n def __init__(self, opt, embeddings, mem_dim, num_layers):\n super(GCN, self).__init__()\n self.opt = opt\n self.layers = num_layers\n self.mem_dim = mem_dim\n self.in_dim = opt.embed_dim+opt.post_dim+opt.pos_dim\n self.emb, self.pos_emb, self.post_emb = embeddings\n\n # rnn layer\n input_size = self.in_dim\n self.rnn = nn.LSTM(input_size, opt.rnn_hidden, opt.rnn_layers, batch_first=True, \\\n dropout=opt.rnn_dropout, bidirectional=opt.bidirect)\n if opt.bidirect:\n self.in_dim = opt.rnn_hidden * 2\n else:\n self.in_dim = opt.rnn_hidden\n\n # drop out\n self.rnn_drop = nn.Dropout(opt.rnn_dropout)\n self.in_drop = nn.Dropout(opt.input_dropout)\n self.gcn_drop = nn.Dropout(opt.gcn_dropout)\n\n # gcn layer\n self.W = nn.ModuleList()\n for layer in range(self.layers):\n input_dim = self.in_dim if layer == 0 else self.mem_dim\n self.W.append(nn.Linear(input_dim, self.mem_dim))\n\n self.attention_heads = opt.attention_heads\n self.attn = MultiHeadAttention(self.attention_heads, self.mem_dim*2)\n self.weight_list = nn.ModuleList()\n for j in range(self.layers):\n input_dim = self.in_dim if j == 0 else self.mem_dim\n self.weight_list.append(nn.Linear(input_dim, self.mem_dim))\n\n self.affine1 = nn.Parameter(torch.Tensor(self.mem_dim, self.mem_dim))\n self.affine2 = nn.Parameter(torch.Tensor(self.mem_dim, self.mem_dim))\n\n def encode_with_rnn(self, rnn_inputs, seq_lens, batch_size):\n h0, c0 = rnn_zero_state(batch_size, self.opt.rnn_hidden, self.opt.rnn_layers, self.opt.bidirect)\n rnn_inputs = nn.utils.rnn.pack_padded_sequence(rnn_inputs, seq_lens, batch_first=True, enforce_sorted=False)\n rnn_outputs, (ht, ct) = self.rnn(rnn_inputs, (h0, c0))\n rnn_outputs, _ = nn.utils.rnn.pad_packed_sequence(rnn_outputs, batch_first=True)\n return rnn_outputs\n\n def forward(self, adj, inputs):\n tok, asp, pos, head, deprel, post, mask, l, _ = inputs # unpack inputs\n src_mask = (tok != 0).unsqueeze(-2)\n maxlen = max(l.data)\n mask_ = (torch.zeros_like(tok) != tok).float().unsqueeze(-1)[:, :maxlen]\n\n # embedding\n word_embs = self.emb(tok)\n embs = [word_embs]\n if self.opt.pos_dim > 0:\n embs += [self.pos_emb(pos)]\n if self.opt.post_dim > 0:\n embs += [self.post_emb(post)]\n embs = torch.cat(embs, dim=2)\n embs = self.in_drop(embs)\n\n # rnn layer\n self.rnn.flatten_parameters()\n gcn_inputs = self.rnn_drop(self.encode_with_rnn(embs, l, tok.size()[0]))\n \n denom_dep = adj.sum(2).unsqueeze(2) + 1\n attn_tensor = self.attn(gcn_inputs, gcn_inputs, src_mask)\n attn_adj_list = [attn_adj.squeeze(1) for attn_adj in torch.split(attn_tensor, 1, dim=1)]\n outputs_dep = None\n adj_ag = None\n\n # * Average Multi-head Attention matrixes\n for i in range(self.attention_heads):\n if adj_ag is None:\n adj_ag = attn_adj_list[i]\n else:\n adj_ag += attn_adj_list[i]\n adj_ag /= self.attention_heads\n\n for j in range(adj_ag.size(0)):\n adj_ag[j] -= torch.diag(torch.diag(adj_ag[j]))\n adj_ag[j] += torch.eye(adj_ag[j].size(0)).cuda()\n adj_ag = mask_ * adj_ag\n\n denom_ag = adj_ag.sum(2).unsqueeze(2) + 1\n outputs_ag = gcn_inputs\n outputs_dep = gcn_inputs\n\n for l in range(self.layers):\n # ************SynGCN*************\n Ax_dep = adj.bmm(outputs_dep)\n AxW_dep = self.W[l](Ax_dep)\n AxW_dep = AxW_dep / denom_dep\n gAxW_dep = F.relu(AxW_dep)\n\n # ************SemGCN*************\n Ax_ag = adj_ag.bmm(outputs_ag)\n AxW_ag = self.weight_list[l](Ax_ag)\n AxW_ag = AxW_ag / denom_ag\n gAxW_ag = F.relu(AxW_ag)\n\n # * mutual Biaffine module\n A1 = F.softmax(torch.bmm(torch.matmul(gAxW_dep, self.affine1), torch.transpose(gAxW_ag, 1, 2)), dim=-1)\n A2 = F.softmax(torch.bmm(torch.matmul(gAxW_ag, self.affine2), torch.transpose(gAxW_dep, 1, 2)), dim=-1)\n gAxW_dep, gAxW_ag = torch.bmm(A1, gAxW_ag), torch.bmm(A2, gAxW_dep)\n outputs_dep = self.gcn_drop(gAxW_dep) if l < self.layers - 1 else gAxW_dep\n outputs_ag = self.gcn_drop(gAxW_ag) if l < self.layers - 1 else gAxW_ag\n\n return outputs_ag, outputs_dep, adj_ag\n\n\ndef rnn_zero_state(batch_size, hidden_dim, num_layers, bidirectional=True):\n total_layers = num_layers * 2 if bidirectional else num_layers\n state_shape = (total_layers, batch_size, hidden_dim)\n h0 = c0 = Variable(torch.zeros(*state_shape), requires_grad=False)\n return h0.cuda(), c0.cuda()\n\n\ndef attention(query, key, mask=None, dropout=None):\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n\n p_attn = F.softmax(scores, dim=-1)\n if dropout is not None:\n p_attn = dropout(p_attn)\n\n return p_attn\n\n\ndef clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n\nclass MultiHeadAttention(nn.Module):\n\n def __init__(self, h, d_model, dropout=0.1):\n super(MultiHeadAttention, self).__init__()\n assert d_model % h == 0\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 2)\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, query, key, mask=None):\n mask = mask[:, :, :query.size(1)]\n if mask is not None:\n mask = mask.unsqueeze(1)\n \n nbatches = query.size(0)\n query, key = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key))]\n\n attn = attention(query, key, mask=mask, dropout=self.dropout)\n return attn\n" ]
[ [ "torch.nn.functional.softmax", "torch.transpose", "torch.cat", "torch.zeros", "torch.nn.Embedding", "numpy.concatenate", "torch.nn.utils.rnn.pad_packed_sequence", "torch.split", "torch.nn.Dropout", "torch.norm", "torch.from_numpy", "torch.nn.utils.rnn.pack_padded_sequence", "torch.tensor", "torch.nn.functional.relu", "torch.bmm", "torch.nn.ModuleList", "torch.zeros_like", "torch.nn.Linear", "torch.diag", "torch.Tensor", "torch.nn.LSTM", "torch.matmul" ] ]
jameshcorbett/parsl
[ "2475a4c5743f3336967c5fe48b84f497336780fe" ]
[ "parsl/monitoring/visualization/plots/default/workflow_resource_plots.py" ]
[ "import math\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objs as go\nfrom plotly.offline import plot\n\n\ndef resource_distribution_plot(df_resources, df_task, type='psutil_process_time_user', label='CPU Time Distribution', option='avg', columns=20,):\n # E.g., psutil_process_time_user or psutil_process_memory_percent\n\n min_range = min(df_resources[type].astype('float'))\n max_range = max(df_resources[type].astype('float'))\n time_step = (max_range - min_range) / columns\n\n if min_range == max_range:\n x_axis = [min_range]\n else:\n x_axis = []\n for i in np.arange(min_range, max_range + time_step, time_step):\n x_axis.append(i)\n\n apps_dict = dict()\n for i in range(len(df_task)):\n row = df_task.iloc[i]\n apps_dict[row['task_id']] = []\n\n def y_axis_setup():\n items = [0] * len(x_axis)\n\n for app, tasks in apps_dict.items():\n if option == 'avg':\n task = df_resources[df_resources['task_id'] ==\n app][type].astype('float').mean()\n elif option == 'max':\n task = df_resources[df_resources['task_id'] == app][type].astype('float').max()\n\n for i in range(len(x_axis) - 1):\n a = task >= x_axis[i]\n b = task < x_axis[i + 1]\n if a and b:\n items[i] += 1\n break\n if task >= x_axis[-1]:\n items[-1] += 1\n return items\n\n if \"memory\" not in type:\n xaxis = dict(autorange=True,\n title='CPU user time (seconds)')\n else:\n xaxis = dict(autorange=True,\n title='Memory usage (bytes)')\n fig = go.Figure(\n data=[go.Bar(x=x_axis,\n y=y_axis_setup(),\n name='tasks')],\n layout=go.Layout(xaxis=xaxis,\n yaxis=dict(title='Tasks'),\n title=label + '(' + option + ')'))\n\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n\n\ndef resource_time_series(tasks, type='psutil_process_time_user', label='CPU user time'):\n tasks['epoch_time'] = (pd.to_datetime(\n tasks['timestamp']) - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n step = int(tasks['resource_monitoring_interval'][0])\n start = tasks['epoch_time'].min()\n end = tasks['epoch_time'].max()\n tasks['relative_time'] = tasks['epoch_time'] - start\n if end != start:\n bins = pd.cut(tasks['relative_time'],\n range(0, end - start + 1, step),\n include_lowest=True)\n df = tasks.groupby(bins, as_index=False)[type].mean()\n df['time'] = step * df.index\n fig = go.Figure(\n data=[go.Scatter(x=df['time'],\n y=df[type],\n )],\n layout=go.Layout(xaxis=dict(autorange=True,\n title='Time (seconds)'),\n yaxis=dict(title=label),\n title=label))\n else:\n fig = go.Figure(\n data=[go.Scatter(x=[0],\n y=[tasks[type].mean()],\n )],\n layout=go.Layout(xaxis=dict(autorange=True,\n title='Time (seconds)'),\n yaxis=dict(title=label),\n title=label))\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n\n\ndef worker_efficiency(task, node):\n try:\n node['epoch_time'] = (pd.to_datetime(\n node['reg_time']) - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n task['epoch_time_start'] = (pd.to_datetime(\n task['task_try_time_launched']) - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n task['epoch_time_running'] = (pd.to_datetime(\n task['task_try_time_running']) - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n task['epoch_time_returned'] = (pd.to_datetime(\n task['task_try_time_returned']) - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n start = int(min(task['epoch_time_start'].min(), node['epoch_time'].min()))\n end = int(task['epoch_time_returned'].max())\n\n worker_plot = [0] * (end - start + 1)\n total_workers = node['worker_count'].sum()\n\n for i, row in task.iterrows():\n if math.isnan(row['epoch_time_running']):\n # skip tasks with no running start time.\n continue\n for j in range(int(row['epoch_time_running']), int(row['epoch_time_returned']) + 1):\n worker_plot[j - start] += 1\n fig = go.Figure(\n data=[go.Scatter(x=list(range(0, end - start + 1)),\n y=worker_plot,\n name='Total busy workers',\n ),\n go.Scatter(x=list(range(0, end - start + 1)),\n y=[total_workers] * (end - start + 1),\n name='Total online workers',\n )\n ],\n layout=go.Layout(xaxis=dict(autorange=True,\n title='Time (seconds)'),\n yaxis=dict(title='Number of workers'),\n title=\"Worker efficiency\"))\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n except Exception as e:\n print(e)\n return \"The worker efficiency plot cannot be generated due to missing data.\"\n\n\ndef resource_efficiency(resource, node, label='CPU'):\n try:\n resource['epoch_time'] = (pd.to_datetime(\n resource['timestamp']) - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n node['epoch_time'] = (pd.to_datetime(\n node['reg_time']) - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n resource = resource.sort_values(by='epoch_time')\n start = min(resource['epoch_time'].min(), node['epoch_time'].min())\n end = resource['epoch_time'].max()\n resource['relative_time'] = resource['epoch_time'] - start\n node['relative_time'] = node['epoch_time'] - start\n\n task_plot = [0] * (end - start + 1)\n if label == 'CPU':\n total = node['cpu_count'].sum()\n elif label == 'mem':\n total = node['total_memory'].sum() / 1024 / 1024 / 1024\n\n resource['total_cpu_time'] = resource['psutil_process_time_user'] + resource['psutil_process_time_system']\n for task_id in resource['task_id'].unique():\n tmp = resource[resource['task_id'] == task_id]\n tmp['last_timestamp'] = tmp['relative_time'].shift(1)\n if label == 'CPU':\n tmp['last_cputime'] = tmp['total_cpu_time'].shift(1)\n for index, row in tmp.iterrows():\n if np.isnan(row['last_timestamp']):\n continue\n for i in range(int(row['last_timestamp']), int(row['relative_time'])):\n if label == 'CPU':\n diff = (row['total_cpu_time'] - row['last_cputime']) / (row['relative_time'] - row['last_timestamp'])\n elif label == 'mem':\n diff = row['psutil_process_memory_resident'] / 1024 / 1024 / 1024\n task_plot[i] += diff\n\n if label == 'CPU':\n name1 = 'Used CPU cores'\n name2 = 'Total CPU cores'\n yaxis = 'Number of CPU cores'\n title = 'CPU usage'\n elif label == 'mem':\n name1 = 'Used memory'\n name2 = 'Total memory'\n yaxis = 'Memory (GB)'\n title = 'Memory usage'\n\n fig = go.Figure(\n data=[go.Scatter(x=list(range(0, end - start + 1)),\n y=task_plot,\n name=name1,\n ),\n go.Scatter(x=list(range(0, end - start + 1)),\n y=[total] * (end - start + 1),\n name=name2,\n )\n ],\n layout=go.Layout(xaxis=dict(autorange=True,\n title='Time (seconds)'),\n yaxis=dict(title=yaxis),\n title=title))\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n except Exception as e:\n print(e)\n return \"The resource efficiency plot cannot be generated because of exception {}.\".format(e)\n" ]
[ [ "pandas.to_datetime", "numpy.isnan", "numpy.arange", "pandas.Timedelta", "pandas.Timestamp" ] ]
lwschm/EconML
[ "6e7b107e1f8a7a5922489eb81143db8656ff01af" ]
[ "econml/tests/test_dml.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport unittest\nimport pytest\nimport pickle\nfrom sklearn.linear_model import LinearRegression, Lasso, LassoCV, LogisticRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import OneHotEncoder, FunctionTransformer, PolynomialFeatures\nfrom sklearn.model_selection import KFold, GroupKFold\nfrom econml.dml import DML, LinearDML, SparseLinearDML, KernelDML, CausalForestDML\nfrom econml.dml import NonParamDML\nimport numpy as np\nfrom econml.utilities import shape, hstack, vstack, reshape, cross_product\nfrom econml.inference import BootstrapInference, EmpiricalInferenceResults, NormalInferenceResults\nfrom contextlib import ExitStack\nfrom sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier\nimport itertools\nfrom econml.sklearn_extensions.linear_model import WeightedLasso, StatsModelsRLM, StatsModelsLinearRegression\nfrom econml.tests.test_statsmodels import _summarize\nimport econml.tests.utilities # bugfix for assertWarns\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\n\n# all solutions to underdetermined (or exactly determined) Ax=b are given by A⁺b+(I-A⁺A)w for some arbitrary w\n# note that if Ax=b is overdetermined, this will raise an assertion error\n\n\ndef rand_sol(A, b):\n \"\"\"Generate a random solution to the equation Ax=b.\"\"\"\n assert np.linalg.matrix_rank(A) <= len(b)\n A_plus = np.linalg.pinv(A)\n x = A_plus @ b\n return x + (np.eye(x.shape[0]) - A_plus @ A) @ np.random.normal(size=x.shape)\n\n\[email protected]\nclass TestDML(unittest.TestCase):\n\n def test_cate_api(self):\n \"\"\"Test that we correctly implement the CATE API.\"\"\"\n n_c = 20 # number of rows for continuous models\n n_d = 30 # number of rows for discrete models\n\n def make_random(n, is_discrete, d):\n if d is None:\n return None\n sz = (n, d) if d >= 0 else (n,)\n if is_discrete:\n while True:\n arr = np.random.choice(['a', 'b', 'c'], size=sz)\n # ensure that we've got at least 6 of every element\n # 2 outer splits, 3 inner splits when model_t is 'auto' and treatment is discrete\n # NOTE: this number may need to change if the default number of folds in\n # WeightedStratifiedKFold changes\n _, counts = np.unique(arr, return_counts=True)\n if len(counts) == 3 and counts.min() > 5:\n return arr\n else:\n return np.random.normal(size=sz)\n\n for d_t in [2, 1, -1]:\n for is_discrete in [True, False] if d_t <= 1 else [False]:\n for d_y in [3, 1, -1]:\n for d_x in [2, None]:\n for d_w in [2, None]:\n n = n_d if is_discrete else n_c\n W, X, Y, T = [make_random(n, is_discrete, d)\n for is_discrete, d in [(False, d_w),\n (False, d_x),\n (False, d_y),\n (is_discrete, d_t)]]\n\n for featurizer, fit_cate_intercept in\\\n [(None, True),\n (PolynomialFeatures(degree=2, include_bias=False), True),\n (PolynomialFeatures(degree=2, include_bias=True), False)]:\n\n d_t_final = 2 if is_discrete else d_t\n\n effect_shape = (n,) + ((d_y,) if d_y > 0 else ())\n effect_summaryframe_shape = (n * (d_y if d_y > 0 else 1), 6)\n marginal_effect_shape = ((n,) +\n ((d_y,) if d_y > 0 else ()) +\n ((d_t_final,) if d_t_final > 0 else ()))\n marginal_effect_summaryframe_shape = (n * (d_y if d_y > 0 else 1) *\n (d_t_final if d_t_final > 0 else 1), 6)\n\n # since T isn't passed to const_marginal_effect, defaults to one row if X is None\n const_marginal_effect_shape = ((n if d_x else 1,) +\n ((d_y,) if d_y > 0 else ()) +\n ((d_t_final,) if d_t_final > 0 else()))\n const_marginal_effect_summaryframe_shape = (\n (n if d_x else 1) * (d_y if d_y > 0 else 1) *\n (d_t_final if d_t_final > 0 else 1), 6)\n\n fd_x = featurizer.fit_transform(X).shape[1:] if featurizer and d_x\\\n else ((d_x,) if d_x else (0,))\n coef_shape = Y.shape[1:] + (T.shape[1:] if not is_discrete else (2,)) + fd_x\n\n coef_summaryframe_shape = (\n (d_y if d_y > 0 else 1) * (fd_x[0] if fd_x[0] >\n 0 else 1) * (d_t_final if d_t_final > 0 else 1), 6)\n intercept_shape = Y.shape[1:] + (T.shape[1:] if not is_discrete else (2,))\n intercept_summaryframe_shape = (\n (d_y if d_y > 0 else 1) * (d_t_final if d_t_final > 0 else 1), 6)\n\n model_t = LogisticRegression() if is_discrete else Lasso()\n\n all_infs = [None, 'auto', BootstrapInference(2)]\n\n for est, multi, infs in\\\n [(DML(model_y=Lasso(),\n model_t=model_t,\n model_final=Lasso(alpha=0.1, fit_intercept=False),\n featurizer=featurizer,\n fit_cate_intercept=fit_cate_intercept,\n discrete_treatment=is_discrete),\n True,\n [None] +\n ([BootstrapInference(n_bootstrap_samples=20)] if not is_discrete else [])),\n (DML(model_y=Lasso(),\n model_t=model_t,\n model_final=StatsModelsRLM(fit_intercept=False),\n featurizer=featurizer,\n fit_cate_intercept=fit_cate_intercept,\n discrete_treatment=is_discrete),\n True,\n ['auto']),\n (LinearDML(model_y=Lasso(),\n model_t='auto',\n featurizer=featurizer,\n fit_cate_intercept=fit_cate_intercept,\n discrete_treatment=is_discrete),\n True,\n all_infs),\n (SparseLinearDML(model_y=WeightedLasso(),\n model_t=model_t,\n featurizer=featurizer,\n fit_cate_intercept=fit_cate_intercept,\n discrete_treatment=is_discrete),\n True,\n [None, 'auto'] +\n ([BootstrapInference(n_bootstrap_samples=20)] if not is_discrete else [])),\n (KernelDML(model_y=WeightedLasso(),\n model_t=model_t,\n fit_cate_intercept=fit_cate_intercept,\n discrete_treatment=is_discrete),\n False,\n [None]),\n (CausalForestDML(model_y=WeightedLasso(),\n model_t=model_t,\n featurizer=featurizer,\n n_estimators=4,\n n_jobs=1,\n discrete_treatment=is_discrete),\n True,\n ['auto', 'blb'])]:\n\n if not(multi) and d_y > 1:\n continue\n\n if X is None and isinstance(est, CausalForestDML):\n continue\n\n # ensure we can serialize the unfit estimator\n pickle.dumps(est)\n\n for inf in infs:\n with self.subTest(d_w=d_w, d_x=d_x, d_y=d_y, d_t=d_t,\n is_discrete=is_discrete, est=est, inf=inf):\n\n if X is None and (not fit_cate_intercept):\n with pytest.raises(AttributeError):\n est.fit(Y, T, X=X, W=W, inference=inf)\n continue\n\n est.fit(Y, T, X=X, W=W, inference=inf)\n\n # ensure we can pickle the fit estimator\n pickle.dumps(est)\n\n # make sure we can call the marginal_effect and effect methods\n const_marg_eff = est.const_marginal_effect(X)\n marg_eff = est.marginal_effect(T, X)\n self.assertEqual(shape(marg_eff), marginal_effect_shape)\n self.assertEqual(shape(const_marg_eff), const_marginal_effect_shape)\n\n np.testing.assert_allclose(\n marg_eff if d_x else marg_eff[0:1], const_marg_eff)\n\n assert isinstance(est.score_, float)\n for score in est.nuisance_scores_y:\n assert isinstance(score, float)\n for score in est.nuisance_scores_t:\n assert isinstance(score, float)\n\n T0 = np.full_like(T, 'a') if is_discrete else np.zeros_like(T)\n eff = est.effect(X, T0=T0, T1=T)\n self.assertEqual(shape(eff), effect_shape)\n\n if ((not isinstance(est, KernelDML)) and\n (not isinstance(est, CausalForestDML))):\n self.assertEqual(shape(est.coef_), coef_shape)\n if fit_cate_intercept:\n self.assertEqual(shape(est.intercept_), intercept_shape)\n else:\n with pytest.raises(AttributeError):\n self.assertEqual(shape(est.intercept_), intercept_shape)\n\n if inf is not None:\n const_marg_eff_int = est.const_marginal_effect_interval(X)\n marg_eff_int = est.marginal_effect_interval(T, X)\n self.assertEqual(shape(marg_eff_int),\n (2,) + marginal_effect_shape)\n self.assertEqual(shape(const_marg_eff_int),\n (2,) + const_marginal_effect_shape)\n self.assertEqual(shape(est.effect_interval(X, T0=T0, T1=T)),\n (2,) + effect_shape)\n if ((not isinstance(est, KernelDML)) and\n (not isinstance(est, CausalForestDML))):\n self.assertEqual(shape(est.coef__interval()),\n (2,) + coef_shape)\n if fit_cate_intercept:\n self.assertEqual(shape(est.intercept__interval()),\n (2,) + intercept_shape)\n else:\n with pytest.raises(AttributeError):\n self.assertEqual(shape(est.intercept__interval()),\n (2,) + intercept_shape)\n\n const_marg_effect_inf = est.const_marginal_effect_inference(X)\n T1 = np.full_like(T, 'b') if is_discrete else T\n effect_inf = est.effect_inference(X, T0=T0, T1=T1)\n marg_effect_inf = est.marginal_effect_inference(T, X)\n # test const marginal inference\n self.assertEqual(shape(const_marg_effect_inf.summary_frame()),\n const_marginal_effect_summaryframe_shape)\n self.assertEqual(shape(const_marg_effect_inf.point_estimate),\n const_marginal_effect_shape)\n self.assertEqual(shape(const_marg_effect_inf.stderr),\n const_marginal_effect_shape)\n self.assertEqual(shape(const_marg_effect_inf.var),\n const_marginal_effect_shape)\n self.assertEqual(shape(const_marg_effect_inf.pvalue()),\n const_marginal_effect_shape)\n self.assertEqual(shape(const_marg_effect_inf.zstat()),\n const_marginal_effect_shape)\n self.assertEqual(shape(const_marg_effect_inf.conf_int()),\n (2,) + const_marginal_effect_shape)\n np.testing.assert_array_almost_equal(\n const_marg_effect_inf.conf_int()[0],\n const_marg_eff_int[0], decimal=5)\n const_marg_effect_inf.population_summary()._repr_html_()\n\n # test effect inference\n self.assertEqual(shape(effect_inf.summary_frame()),\n effect_summaryframe_shape)\n self.assertEqual(shape(effect_inf.point_estimate),\n effect_shape)\n self.assertEqual(shape(effect_inf.stderr),\n effect_shape)\n self.assertEqual(shape(effect_inf.var),\n effect_shape)\n self.assertEqual(shape(effect_inf.pvalue()),\n effect_shape)\n self.assertEqual(shape(effect_inf.zstat()),\n effect_shape)\n self.assertEqual(shape(effect_inf.conf_int()),\n (2,) + effect_shape)\n np.testing.assert_array_almost_equal(\n effect_inf.conf_int()[0],\n est.effect_interval(X, T0=T0, T1=T1)[0], decimal=5)\n effect_inf.population_summary()._repr_html_()\n\n # test marginal effect inference\n self.assertEqual(shape(marg_effect_inf.summary_frame()),\n marginal_effect_summaryframe_shape)\n self.assertEqual(shape(marg_effect_inf.point_estimate),\n marginal_effect_shape)\n self.assertEqual(shape(marg_effect_inf.stderr),\n marginal_effect_shape)\n self.assertEqual(shape(marg_effect_inf.var),\n marginal_effect_shape)\n self.assertEqual(shape(marg_effect_inf.pvalue()),\n marginal_effect_shape)\n self.assertEqual(shape(marg_effect_inf.zstat()),\n marginal_effect_shape)\n self.assertEqual(shape(marg_effect_inf.conf_int()),\n (2,) + marginal_effect_shape)\n np.testing.assert_array_almost_equal(\n marg_effect_inf.conf_int()[0], marg_eff_int[0], decimal=5)\n marg_effect_inf.population_summary()._repr_html_()\n\n # test coef__inference and intercept__inference\n if ((not isinstance(est, KernelDML)) and\n (not isinstance(est, CausalForestDML))):\n if X is not None:\n self.assertEqual(\n shape(est.coef__inference().summary_frame()),\n coef_summaryframe_shape)\n np.testing.assert_array_almost_equal(\n est.coef__inference().conf_int()\n [0], est.coef__interval()[0], decimal=5)\n\n if fit_cate_intercept:\n cm = ExitStack()\n # ExitStack can be used as a \"do nothing\" ContextManager\n else:\n cm = pytest.raises(AttributeError)\n with cm:\n self.assertEqual(shape(est.intercept__inference().\n summary_frame()),\n intercept_summaryframe_shape)\n np.testing.assert_array_almost_equal(\n est.intercept__inference().conf_int()\n [0], est.intercept__interval()[0], decimal=5)\n\n est.summary()\n\n est.score(Y, T, X, W)\n\n if isinstance(est, CausalForestDML):\n np.testing.assert_array_equal(est.feature_importances_.shape,\n ((d_y,) if d_y > 0 else()) + fd_x)\n\n # make sure we can call effect with implied scalar treatments,\n # no matter the dimensions of T, and also that we warn when there\n # are multiple treatments\n if d_t > 1:\n cm = self.assertWarns(Warning)\n else:\n # ExitStack can be used as a \"do nothing\" ContextManager\n cm = ExitStack()\n with cm:\n effect_shape2 = (n if d_x else 1,) + ((d_y,) if d_y > 0 else())\n eff = est.effect(X) if not is_discrete else est.effect(\n X, T0='a', T1='b')\n self.assertEqual(shape(eff), effect_shape2)\n\n def test_cate_api_nonparam(self):\n \"\"\"Test that we correctly implement the CATE API.\"\"\"\n n = 20\n\n def make_random(is_discrete, d):\n if d is None:\n return None\n sz = (n, d) if d >= 0 else (n,)\n if is_discrete:\n while True:\n arr = np.random.choice(['a', 'b'], size=sz)\n # ensure that we've got at least two of every element\n _, counts = np.unique(arr, return_counts=True)\n if len(counts) == 2 and counts.min() > 2:\n return arr\n else:\n return np.random.normal(size=sz)\n\n for d_t in [1, -1]:\n for is_discrete in [True, False] if d_t <= 1 else [False]:\n for d_y in [3, 1, -1]:\n for d_x in [2, None]:\n for d_w in [2, None]:\n W, X, Y, T = [make_random(is_discrete, d)\n for is_discrete, d in [(False, d_w),\n (False, d_x),\n (False, d_y),\n (is_discrete, d_t)]]\n\n d_t_final = 1 if is_discrete else d_t\n\n effect_shape = (n,) + ((d_y,) if d_y > 0 else ())\n effect_summaryframe_shape = (n * (d_y if d_y > 0 else 1), 6)\n marginal_effect_shape = ((n,) +\n ((d_y,) if d_y > 0 else ()) +\n ((d_t_final,) if d_t_final > 0 else ()))\n marginal_effect_summaryframe_shape = (n * (d_y if d_y > 0 else 1) *\n (d_t_final if d_t_final > 0 else 1), 6)\n # since T isn't passed to const_marginal_effect, defaults to one row if X is None\n const_marginal_effect_shape = ((n if d_x else 1,) +\n ((d_y,) if d_y > 0 else ()) +\n ((d_t_final,) if d_t_final > 0 else()))\n const_marginal_effect_summaryframe_shape = (\n (n if d_x else 1) * (d_y if d_y > 0 else 1) * (d_t_final if d_t_final > 0 else 1), 6)\n\n model_t = LogisticRegression() if is_discrete else WeightedLasso()\n\n base_infs = [None, BootstrapInference(2)]\n for est, multi, infs in [(NonParamDML(model_y=WeightedLasso(),\n model_t=model_t,\n model_final=WeightedLasso(),\n featurizer=None,\n discrete_treatment=is_discrete),\n True,\n base_infs),\n (NonParamDML(model_y=WeightedLasso(),\n model_t=model_t,\n model_final=WeightedLasso(),\n featurizer=FunctionTransformer(),\n discrete_treatment=is_discrete),\n True,\n base_infs), ]:\n\n if not(multi) and d_y > 1:\n continue\n\n for inf in infs:\n with self.subTest(d_w=d_w, d_x=d_x, d_y=d_y, d_t=d_t,\n is_discrete=is_discrete, est=est, inf=inf):\n if X is None:\n with pytest.raises(AttributeError):\n est.fit(Y, T, X=X, W=W, inference=inf)\n continue\n\n est.fit(Y, T, X=X, W=W, inference=inf)\n # make sure we can call the marginal_effect and effect methods\n const_marg_eff = est.const_marginal_effect(X)\n marg_eff = est.marginal_effect(T, X)\n self.assertEqual(shape(marg_eff), marginal_effect_shape)\n self.assertEqual(shape(const_marg_eff), const_marginal_effect_shape)\n\n np.testing.assert_array_equal(\n marg_eff if d_x else marg_eff[0:1], const_marg_eff)\n\n T0 = np.full_like(T, 'a') if is_discrete else np.zeros_like(T)\n eff = est.effect(X, T0=T0, T1=T)\n self.assertEqual(shape(eff), effect_shape)\n\n if inf is not None:\n const_marg_eff_int = est.const_marginal_effect_interval(X)\n marg_eff_int = est.marginal_effect_interval(T, X)\n self.assertEqual(shape(marg_eff_int),\n (2,) + marginal_effect_shape)\n self.assertEqual(shape(const_marg_eff_int),\n (2,) + const_marginal_effect_shape)\n self.assertEqual(shape(est.effect_interval(X, T0=T0, T1=T)),\n (2,) + effect_shape)\n if inf in ['auto', 'statsmodels', 'debiasedlasso', 'blb']:\n const_marg_effect_inf = est.const_marginal_effect_inference(X)\n T1 = np.full_like(T, 'b') if is_discrete else T\n effect_inf = est.effect_inference(X, T0=T0, T1=T1)\n marg_effect_inf = est.marginal_effect_inference(T, X)\n # test const marginal inference\n self.assertEqual(shape(const_marg_effect_inf.summary_frame()),\n const_marginal_effect_summaryframe_shape)\n self.assertEqual(shape(const_marg_effect_inf.point_estimate),\n const_marginal_effect_shape)\n self.assertEqual(shape(const_marg_effect_inf.stderr),\n const_marginal_effect_shape)\n self.assertEqual(shape(const_marg_effect_inf.var),\n const_marginal_effect_shape)\n self.assertEqual(shape(const_marg_effect_inf.pvalue()),\n const_marginal_effect_shape)\n self.assertEqual(shape(const_marg_effect_inf.zstat()),\n const_marginal_effect_shape)\n self.assertEqual(shape(const_marg_effect_inf.conf_int()),\n (2,) + const_marginal_effect_shape)\n np.testing.assert_array_almost_equal(\n const_marg_effect_inf.conf_int()[0],\n const_marg_eff_int[0], decimal=5)\n const_marg_effect_inf.population_summary()._repr_html_()\n\n # test effect inference\n self.assertEqual(shape(effect_inf.summary_frame()),\n effect_summaryframe_shape)\n self.assertEqual(shape(effect_inf.point_estimate),\n effect_shape)\n self.assertEqual(shape(effect_inf.stderr),\n effect_shape)\n self.assertEqual(shape(effect_inf.var),\n effect_shape)\n self.assertEqual(shape(effect_inf.pvalue()),\n effect_shape)\n self.assertEqual(shape(effect_inf.zstat()),\n effect_shape)\n self.assertEqual(shape(effect_inf.conf_int()),\n (2,) + effect_shape)\n np.testing.assert_array_almost_equal(\n effect_inf.conf_int()[0],\n est.effect_interval(X, T0=T0, T1=T1)[0], decimal=5)\n effect_inf.population_summary()._repr_html_()\n\n # test marginal effect inference\n self.assertEqual(shape(marg_effect_inf.summary_frame()),\n marginal_effect_summaryframe_shape)\n self.assertEqual(shape(marg_effect_inf.point_estimate),\n marginal_effect_shape)\n self.assertEqual(shape(marg_effect_inf.stderr),\n marginal_effect_shape)\n self.assertEqual(shape(marg_effect_inf.var),\n marginal_effect_shape)\n self.assertEqual(shape(marg_effect_inf.pvalue()),\n marginal_effect_shape)\n self.assertEqual(shape(marg_effect_inf.zstat()),\n marginal_effect_shape)\n self.assertEqual(shape(marg_effect_inf.conf_int()),\n (2,) + marginal_effect_shape)\n np.testing.assert_array_almost_equal(\n marg_effect_inf.conf_int()[0], marg_eff_int[0], decimal=5)\n marg_effect_inf.population_summary()._repr_html_()\n\n est.score(Y, T, X, W)\n\n # make sure we can call effect with implied scalar treatments, no matter the\n # dimensions of T, and also that we warn when there are multiple treatments\n if d_t > 1:\n cm = self.assertWarns(Warning)\n else:\n cm = ExitStack() # ExitStack can be used as a \"do nothing\" ContextManager\n with cm:\n effect_shape2 = (n if d_x else 1,) + ((d_y,) if d_y > 0 else())\n eff = est.effect(X) if not is_discrete else est.effect(X, T0='a', T1='b')\n self.assertEqual(shape(eff), effect_shape2)\n\n def test_bad_splits_discrete(self):\n \"\"\"\n Tests that when some training splits in a crossfit fold don't contain all treatments then an error\n is raised.\n \"\"\"\n Y = np.array([2, 3, 1, 3, 2, 1, 1, 1])\n T = np.array([2, 2, 1, 2, 1, 1, 1, 1])\n X = np.ones((8, 1))\n est = LinearDML(cv=[(np.arange(4, 8), np.arange(4))], discrete_treatment=True)\n with pytest.raises(AttributeError):\n est.fit(Y, T, X=X)\n Y = np.array([2, 3, 1, 3, 2, 1, 1, 1])\n T = np.array([2, 2, 1, 2, 2, 2, 2, 2])\n X = np.ones((8, 1))\n est = LinearDML(cv=[(np.arange(4, 8), np.arange(4))], discrete_treatment=True)\n with pytest.raises(AttributeError):\n est.fit(Y, T, X=X)\n\n def test_bad_treatment_nonparam(self):\n \"\"\"\n Test that the non-parametric dml raises errors when treatment is not binary or single dimensional\n \"\"\"\n Y = np.array([2, 3, 1, 3, 2, 1, 1, 1])\n T = np.array([3, 2, 1, 2, 1, 2, 1, 3])\n X = np.ones((8, 1))\n est = NonParamDML(model_y=WeightedLasso(),\n model_t=LogisticRegression(),\n model_final=WeightedLasso(),\n discrete_treatment=True)\n with pytest.raises(AttributeError):\n est.fit(Y, T, X=X)\n T = np.ones((8, 2))\n est = NonParamDML(model_y=WeightedLasso(),\n model_t=LinearRegression(),\n model_final=WeightedLasso(),\n discrete_treatment=False)\n with pytest.raises(AttributeError):\n est.fit(Y, T, X=X)\n\n def test_access_to_internal_models(self):\n \"\"\"\n Test that API related to accessing the nuisance models, cate_model and featurizer is working.\n \"\"\"\n Y = np.array([2, 3, 1, 3, 2, 1, 1, 1])\n T = np.array([3, 2, 1, 2, 1, 2, 1, 3])\n X = np.ones((8, 1))\n est = DML(model_y=WeightedLasso(),\n model_t=LogisticRegression(),\n model_final=WeightedLasso(),\n featurizer=PolynomialFeatures(degree=2, include_bias=False),\n fit_cate_intercept=True,\n discrete_treatment=True)\n est.fit(Y, T, X=X)\n assert isinstance(est.original_featurizer, PolynomialFeatures)\n assert isinstance(est.featurizer_, Pipeline)\n assert isinstance(est.model_cate, WeightedLasso)\n for mdl in est.models_y:\n assert isinstance(mdl, WeightedLasso)\n for mdl in est.models_t:\n assert isinstance(mdl, LogisticRegression)\n np.testing.assert_array_equal(est.cate_feature_names(['A']), ['A', 'A^2'])\n np.testing.assert_array_equal(est.cate_feature_names(), ['X0', 'X0^2'])\n est = DML(model_y=WeightedLasso(),\n model_t=LogisticRegression(),\n model_final=WeightedLasso(),\n featurizer=None,\n fit_cate_intercept=True,\n discrete_treatment=True)\n est.fit(Y, T, X=X)\n assert est.original_featurizer is None\n assert isinstance(est.featurizer_, FunctionTransformer)\n assert isinstance(est.model_cate, WeightedLasso)\n for mdl in est.models_y:\n assert isinstance(mdl, WeightedLasso)\n for mdl in est.models_t:\n assert isinstance(mdl, LogisticRegression)\n np.testing.assert_array_equal(est.cate_feature_names(['A']), ['A'])\n\n def test_forest_dml_perf(self):\n \"\"\"Testing accuracy of forest DML is reasonable\"\"\"\n np.random.seed(1234)\n n = 20000 # number of raw samples\n d = 10\n for _ in range(2):\n X = np.random.binomial(1, .5, size=(n, d))\n T = np.random.binomial(1, .5, size=(n,))\n\n def true_fn(x):\n return -1 + 2 * x[:, 0] + x[:, 1] * x[:, 2]\n y = true_fn(X) * T + X[:, 0] + (1 * X[:, 0] + 1) * np.random.normal(0, 1, size=(n,))\n\n XT = np.hstack([T.reshape(-1, 1), X])\n X1, X2, y1, y2, X1_sum, X2_sum, y1_sum, y2_sum, n1_sum, n2_sum, var1_sum, var2_sum = _summarize(XT, y)\n # We concatenate the two copies data\n X_sum = np.vstack([np.array(X1_sum)[:, 1:], np.array(X2_sum)[:, 1:]])\n T_sum = np.concatenate((np.array(X1_sum)[:, 0], np.array(X2_sum)[:, 0]))\n y_sum = np.concatenate((y1_sum, y2_sum)) # outcome\n n_sum = np.concatenate((n1_sum, n2_sum)) # number of summarized points\n var_sum = np.concatenate((var1_sum, var2_sum)) # variance of the summarized points\n for summarized, min_samples_leaf in [(False, 20), (True, 1)]:\n est = CausalForestDML(model_y=GradientBoostingRegressor(n_estimators=30, min_samples_leaf=30),\n model_t=GradientBoostingClassifier(n_estimators=30, min_samples_leaf=30),\n discrete_treatment=True,\n cv=2,\n n_estimators=1000,\n max_samples=.4,\n min_samples_leaf=min_samples_leaf,\n min_impurity_decrease=0.001,\n verbose=0, min_var_fraction_leaf=.1,\n fit_intercept=False,\n random_state=12345)\n if summarized:\n est.fit(y_sum, T_sum, X=X_sum[:, :4], W=X_sum[:, 4:],\n sample_weight=n_sum)\n else:\n est.fit(y, T, X=X[:, :4], W=X[:, 4:])\n X_test = np.array(list(itertools.product([0, 1], repeat=4)))\n point = est.effect(X_test)\n truth = true_fn(X_test)\n lb, ub = est.effect_interval(X_test, alpha=.01)\n np.testing.assert_allclose(point, truth, rtol=0, atol=.3)\n np.testing.assert_array_less(lb - .01, truth)\n np.testing.assert_array_less(truth, ub + .01)\n\n est = CausalForestDML(model_y=GradientBoostingRegressor(n_estimators=50, min_samples_leaf=100),\n model_t=GradientBoostingRegressor(n_estimators=50, min_samples_leaf=100),\n discrete_treatment=False,\n cv=2,\n n_estimators=1000,\n max_samples=.4,\n min_samples_leaf=min_samples_leaf,\n min_impurity_decrease=0.001,\n verbose=0, min_var_fraction_leaf=.1,\n fit_intercept=False,\n random_state=12345)\n if summarized:\n est.fit(y_sum, T_sum, X=X_sum[:, :4], W=X_sum[:, 4:],\n sample_weight=n_sum)\n else:\n est.fit(y, T, X=X[:, :4], W=X[:, 4:])\n X_test = np.array(list(itertools.product([0, 1], repeat=4)))\n point = est.effect(X_test)\n truth = true_fn(X_test)\n lb, ub = est.effect_interval(X_test, alpha=.01)\n np.testing.assert_allclose(point, truth, rtol=0, atol=.3)\n np.testing.assert_array_less(lb - .01, truth)\n np.testing.assert_array_less(truth, ub + .01)\n\n def test_can_use_vectors(self):\n \"\"\"Test that we can pass vectors for T and Y (not only 2-dimensional arrays).\"\"\"\n dmls = [\n LinearDML(model_y=LinearRegression(), model_t=LinearRegression(), fit_cate_intercept=False),\n SparseLinearDML(model_y=LinearRegression(), model_t=LinearRegression(), fit_cate_intercept=False)\n ]\n for dml in dmls:\n dml.fit(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), X=np.ones((6, 1)))\n self.assertAlmostEqual(dml.coef_.reshape(())[()], 1)\n score = dml.score(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), np.ones((6, 1)))\n self.assertAlmostEqual(score, 0)\n\n def test_can_use_sample_weights(self):\n \"\"\"Test that we can pass sample weights to an estimator.\"\"\"\n dmls = [\n LinearDML(model_y=LinearRegression(), model_t='auto', fit_cate_intercept=False),\n SparseLinearDML(model_y=LinearRegression(), model_t='auto', fit_cate_intercept=False)\n ]\n for dml in dmls:\n dml.fit(np.array([1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]),\n X=np.ones((12, 1)), sample_weight=np.ones((12, )))\n self.assertAlmostEqual(dml.coef_.reshape(())[()], 1)\n\n def test_discrete_treatments(self):\n \"\"\"Test that we can use discrete treatments\"\"\"\n dmls = [\n LinearDML(model_y=LinearRegression(), model_t=LogisticRegression(C=1000),\n fit_cate_intercept=False, discrete_treatment=True),\n SparseLinearDML(model_y=LinearRegression(), model_t=LogisticRegression(C=1000),\n fit_cate_intercept=False, discrete_treatment=True)\n ]\n for dml in dmls:\n # create a simple artificial setup where effect of moving from treatment\n # 1 -> 2 is 2,\n # 1 -> 3 is 1, and\n # 2 -> 3 is -1 (necessarily, by composing the previous two effects)\n # Using an uneven number of examples from different classes,\n # and having the treatments in non-lexicographic order,\n # Should rule out some basic issues.\n dml.fit(np.array([2, 3, 1, 3, 2, 1, 1, 1]), np.array([3, 2, 1, 2, 3, 1, 1, 1]), X=np.ones((8, 1)))\n np.testing.assert_almost_equal(\n dml.effect(\n np.ones((9, 1)),\n T0=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n T1=np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])\n ),\n [0, 2, 1, -2, 0, -1, -1, 1, 0],\n decimal=2)\n dml.score(np.array([2, 3, 1, 3, 2, 1, 1, 1]), np.array([3, 2, 1, 2, 3, 1, 1, 1]), np.ones((8, 1)))\n\n def test_can_custom_splitter(self):\n # test that we can fit with a KFold instance\n dml = LinearDML(model_y=LinearRegression(), model_t=LogisticRegression(C=1000),\n discrete_treatment=True, cv=KFold())\n dml.fit(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), X=np.ones((6, 1)))\n dml.score(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), np.ones((6, 1)))\n\n # test that we can fit with a train/test iterable\n dml = LinearDML(model_y=LinearRegression(), model_t=LogisticRegression(C=1000),\n discrete_treatment=True, cv=[([0, 1, 2], [3, 4, 5])])\n dml.fit(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), X=np.ones((6, 1)))\n dml.score(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), np.ones((6, 1)))\n\n def test_can_use_featurizer(self):\n \"Test that we can use a featurizer, and that fit is only called during training\"\n\n # predetermined splits ensure that all features are seen in each split\n splits = ([0, 2, 3, 6, 8, 11, 13, 15, 16],\n [1, 4, 5, 7, 9, 10, 12, 14, 17])\n\n dml = LinearDML(model_y=LinearRegression(), model_t=LinearRegression(),\n fit_cate_intercept=False, featurizer=OneHotEncoder(sparse=False),\n cv=[splits, splits[::-1]])\n\n T = np.tile([1, 2, 3], 6)\n Y = np.array([1, 2, 3, 1, 2, 3])\n Y = np.concatenate([Y, 0 * Y, -Y])\n X = np.repeat([[7, 8, 9]], 6, axis=1).T\n\n dml.fit(Y, T, X=X)\n\n # because there is one fewer unique element in the test set, fit_transform would return the wrong number of fts\n X_test = np.array([[7, 8]]).T\n\n np.testing.assert_equal(dml.effect(X_test)[::-1], dml.effect(X_test[::-1]))\n eff_int = np.array(dml.effect_interval(X_test))\n eff_int_rev = np.array(dml.effect_interval(X_test[::-1]))\n np.testing.assert_equal(eff_int[:, ::-1], eff_int_rev)\n\n eff_int = np.array(dml.const_marginal_effect_interval(X_test))\n eff_int_rev = np.array(dml.const_marginal_effect_interval(X_test[::-1]))\n np.testing.assert_equal(eff_int[:, ::-1], eff_int_rev)\n\n def test_can_use_statsmodel_inference(self):\n \"\"\"Test that we can use statsmodels to generate confidence intervals\"\"\"\n dml = LinearDML(model_y=LinearRegression(), model_t=LogisticRegression(C=1000),\n discrete_treatment=True)\n dml.fit(np.array([2, 3, 1, 3, 2, 1, 1, 1]), np.array(\n [3, 2, 1, 2, 3, 1, 1, 1]), X=np.ones((8, 1)))\n interval = dml.effect_interval(np.ones((9, 1)),\n T0=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n T1=np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]),\n alpha=0.05)\n point = dml.effect(np.ones((9, 1)),\n T0=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n T1=np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]))\n assert len(interval) == 2\n lo, hi = interval\n assert lo.shape == hi.shape == point.shape\n assert (lo <= point).all()\n assert (point <= hi).all()\n assert (lo < hi).any() # for at least some of the examples, the CI should have nonzero width\n\n interval = dml.const_marginal_effect_interval(np.ones((9, 1)), alpha=0.05)\n point = dml.const_marginal_effect(np.ones((9, 1)))\n assert len(interval) == 2\n lo, hi = interval\n assert lo.shape == hi.shape == point.shape\n assert (lo <= point).all()\n assert (point <= hi).all()\n assert (lo < hi).any() # for at least some of the examples, the CI should have nonzero width\n\n interval = dml.coef__interval(alpha=0.05)\n point = dml.coef_\n assert len(interval) == 2\n lo, hi = interval\n assert lo.shape == hi.shape == point.shape\n assert (lo <= point).all()\n assert (point <= hi).all()\n assert (lo < hi).any() # for at least some of the examples, the CI should have nonzero width\n\n interval = dml.intercept__interval(alpha=0.05)\n point = dml.intercept_\n assert len(interval) == 2\n lo, hi = interval\n assert (lo <= point).all()\n assert (point <= hi).all()\n assert (lo < hi).any() # for at least some of the examples, the CI should have nonzero width\n\n def test_ignores_final_intercept(self):\n \"\"\"Test that final model intercepts are ignored (with a warning)\"\"\"\n class InterceptModel:\n def fit(self, Y, X):\n pass\n\n def predict(self, X):\n return X + 1\n\n def prediction_stderr(self, X):\n return np.zeros(X.shape[0])\n\n # (incorrectly) use a final model with an intercept\n dml = DML(model_y=LinearRegression(), model_t=LinearRegression(),\n model_final=InterceptModel())\n # Because final model is fixed, actual values of T and Y don't matter\n t = np.random.normal(size=100)\n y = np.random.normal(size=100)\n with self.assertWarns(Warning): # we should warn whenever there's an intercept\n dml.fit(y, t)\n assert dml.const_marginal_effect() == 1 # coefficient on X in InterceptModel is 1\n assert dml.const_marginal_effect_inference().point_estimate == 1\n assert dml.const_marginal_effect_inference().conf_int() == (1, 1)\n assert dml.const_marginal_effect_interval() == (1, 1)\n assert dml.effect() == 1\n assert dml.effect_inference().point_estimate == 1\n assert dml.effect_inference().conf_int() == (1, 1)\n assert dml.effect_interval() == (1, 1)\n assert dml.marginal_effect(1) == 1 # coefficient on X in InterceptModel is 1\n assert dml.marginal_effect_inference(1).point_estimate == 1\n assert dml.marginal_effect_inference(1).conf_int() == (1, 1)\n assert dml.marginal_effect_interval(1) == (1, 1)\n\n def test_sparse(self):\n for _ in range(5):\n # Ensure reproducibility\n np.random.seed(1234)\n n_p = np.random.randint(2, 5) # 2 to 4 products\n d_w = np.random.randint(0, 5) # random number of covariates\n min_n = np.ceil(2 + d_w * (1 + (d_w + 1) / n_p)) # minimum number of rows per product\n n_r = np.random.randint(min_n, min_n + 3)\n with self.subTest(n_p=n_p, d_w=d_w, n_r=n_r):\n TestDML._test_sparse(n_p, d_w, n_r)\n\n def test_linear_sparse(self):\n \"\"\"SparseDML test with a sparse DGP\"\"\"\n # Sparse DGP\n np.random.seed(123)\n n_x = 50\n n_nonzero = 5\n n_w = 5\n n = 1000\n # Treatment effect coef\n a = np.zeros(n_x)\n nonzero_idx = np.random.choice(n_x, size=n_nonzero, replace=False)\n a[nonzero_idx] = 1\n # Other coefs\n b = np.zeros(n_x + n_w)\n g = np.zeros(n_x + n_w)\n b_nonzero = np.random.choice(n_x + n_w, size=n_nonzero, replace=False)\n g_nonzero = np.random.choice(n_x + n_w, size=n_nonzero, replace=False)\n b[b_nonzero] = 1\n g[g_nonzero] = 1\n # Features and controls\n x = np.random.normal(size=(n, n_x))\n w = np.random.normal(size=(n, n_w))\n xw = np.hstack([x, w])\n err_T = np.random.normal(size=n)\n T = xw @ b + err_T\n err_Y = np.random.normal(size=n, scale=0.5)\n Y = T * (x @ a) + xw @ g + err_Y\n # Test sparse estimator\n # --> test coef_, intercept_\n sparse_dml = SparseLinearDML(fit_cate_intercept=False)\n sparse_dml.fit(Y, T, X=x, W=w)\n np.testing.assert_allclose(a, sparse_dml.coef_, atol=2e-1)\n with pytest.raises(AttributeError):\n sparse_dml.intercept_\n # --> test treatment effects\n # Restrict x_test to vectors of norm < 1\n x_test = np.random.uniform(size=(10, n_x))\n true_eff = (x_test @ a)\n eff = sparse_dml.effect(x_test, T0=0, T1=1)\n np.testing.assert_allclose(true_eff, eff, atol=0.5)\n # --> check inference\n y_lower, y_upper = sparse_dml.effect_interval(x_test, T0=0, T1=1)\n in_CI = ((y_lower < true_eff) & (true_eff < y_upper))\n # Check that a majority of true effects lie in the 5-95% CI\n self.assertTrue(in_CI.mean() > 0.8)\n\n @staticmethod\n def _generate_recoverable_errors(a_X, X, a_W=None, W=None, featurizer=None):\n \"\"\"Return error vectors e_t and e_y such that OLS can recover the true coefficients from both stages.\"\"\"\n if W is None:\n W = np.empty((shape(X)[0], 0))\n if a_W is None:\n a_W = np.zeros((shape(W)[1],))\n # to correctly recover coefficients for T via OLS, we need e_t to be orthogonal to [W;X]\n WX = hstack([W, X])\n e_t = rand_sol(WX.T, np.zeros((shape(WX)[1],)))\n\n # to correctly recover coefficients for Y via OLS, we need ([X; W]⊗[1; ϕ(X); W])⁺ e_y =\n # -([X; W]⊗[1; ϕ(X); W])⁺ ((ϕ(X)⊗e_t)a_X+(W⊗e_t)a_W)\n # then, to correctly recover a in the third stage, we additionally need (ϕ(X)⊗e_t)ᵀ e_y = 0\n\n ϕ = featurizer.fit_transform(X) if featurizer is not None else X\n\n v_X = cross_product(ϕ, e_t)\n v_W = cross_product(W, e_t)\n\n M = np.linalg.pinv(cross_product(WX, hstack([np.ones((shape(WX)[0], 1)), ϕ, W])))\n e_y = rand_sol(vstack([M, v_X.T]), vstack([-M @ (v_X @ a_X + v_W @ a_W), np.zeros((shape(v_X)[1],))]))\n\n return e_t, e_y\n\n # sparse test case: heterogeneous effect by product\n @staticmethod\n def _test_sparse(n_p, d_w, n_r):\n # need at least as many rows in e_y as there are distinct columns\n # in [X;X⊗W;W⊗W;X⊗e_t] to find a solution for e_t\n assert n_p * n_r >= 2 * n_p + n_p * d_w + d_w * (d_w + 1) / 2\n a = np.random.normal(size=(n_p,)) # one effect per product\n n = n_p * n_r * 100\n p = np.tile(range(n_p), n_r * 100) # product id\n\n b = np.random.normal(size=(d_w + n_p,))\n g = np.random.normal(size=(d_w + n_p,))\n\n x = np.empty((2 * n, n_p)) # product dummies\n w = np.empty((2 * n, d_w))\n y = np.empty(2 * n)\n t = np.empty(2 * n)\n\n for fold in range(0, 2):\n x_f = OneHotEncoder().fit_transform(np.reshape(p, (-1, 1))).toarray()\n w_f = np.random.normal(size=(n, d_w))\n xw_f = hstack([x_f, w_f])\n e_t_f, e_y_f = TestDML._generate_recoverable_errors(a, x_f, W=w_f)\n\n t_f = xw_f @ b + e_t_f\n y_f = t_f * np.choose(p, a) + xw_f @ g + e_y_f\n\n x[fold * n:(fold + 1) * n, :] = x_f\n w[fold * n:(fold + 1) * n, :] = w_f\n y[fold * n:(fold + 1) * n] = y_f\n t[fold * n:(fold + 1) * n] = t_f\n\n dml = SparseLinearDML(model_y=LinearRegression(fit_intercept=False),\n model_t=LinearRegression(fit_intercept=False),\n fit_cate_intercept=False)\n dml.fit(y, t, X=x, W=w)\n\n np.testing.assert_allclose(a, dml.coef_.reshape(-1), atol=1e-1)\n eff = reshape(t * np.choose(np.tile(p, 2), a), (-1,))\n np.testing.assert_allclose(eff, dml.effect(x, T0=0, T1=t), atol=1e-1)\n\n def test_nuisance_scores(self):\n X = np.random.choice(np.arange(5), size=(100, 3))\n y = np.random.normal(size=(100,))\n T = T0 = T1 = np.random.choice(np.arange(3), size=(100, 2))\n W = np.random.normal(size=(100, 2))\n for cv in [1, 2, 3]:\n est = LinearDML(cv=cv)\n est.fit(y, T, X=X, W=W)\n assert len(est.nuisance_scores_t) == len(est.nuisance_scores_y) == cv\n\n def test_categories(self):\n dmls = [LinearDML, SparseLinearDML]\n for ctor in dmls:\n dml1 = ctor(model_y=LinearRegression(), model_t=LogisticRegression(C=1000),\n fit_cate_intercept=False, discrete_treatment=True, random_state=123)\n dml2 = ctor(model_y=LinearRegression(), model_t=LogisticRegression(C=1000),\n fit_cate_intercept=False, discrete_treatment=True, categories=['c', 'b', 'a'],\n random_state=123)\n\n # create a simple artificial setup where effect of moving from treatment\n # a -> b is 2,\n # a -> c is 1, and\n # b -> c is -1 (necessarily, by composing the previous two effects)\n # Using an uneven number of examples from different classes,\n # and having the treatments in non-lexicographic order,\n # should rule out some basic issues.\n\n # Note that explicitly specifying the dtype as object is necessary until\n # there's a fix for https://github.com/scikit-learn/scikit-learn/issues/15616\n\n for dml in [dml1, dml2]:\n dml.fit(np.array([2, 3, 1, 3, 2, 1, 1, 1]),\n np.array(['c', 'b', 'a', 'b', 'c', 'a', 'a', 'a'], dtype='object'), X=np.ones((8, 1)))\n\n # estimated effects should be identical when treatment is explicitly given\n np.testing.assert_almost_equal(\n dml1.effect(\n np.ones((9, 1)),\n T0=np.array(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'], dtype='object'),\n T1=np.array(['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'], dtype='object')\n ),\n dml2.effect(\n np.ones((9, 1)),\n T0=np.array(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'], dtype='object'),\n T1=np.array(['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'], dtype='object')\n ),\n decimal=4)\n\n # but const_marginal_effect should be reordered based on the explicit cagetories\n cme1 = dml1.const_marginal_effect(np.ones((1, 1))).reshape(-1)\n cme2 = dml2.const_marginal_effect(np.ones((1, 1))).reshape(-1)\n self.assertAlmostEqual(cme1[1], -cme2[1], places=3) # 1->3 in original ordering; 3->1 in new ordering\n # 1-> 2 in original ordering; combination of 3->1 and 3->2\n self.assertAlmostEqual(cme1[0], -cme2[1] + cme2[0], places=3)\n\n def test_groups(self):\n groups = [1, 2, 3, 4, 5, 6] * 10\n t = groups\n y = groups\n est = LinearDML()\n with pytest.raises(Exception): # can't pass groups without a compatible n_split\n est.fit(y, t, groups=groups)\n\n # test outer grouping\n est = LinearDML(model_y=LinearRegression(), model_t=LinearRegression(), cv=GroupKFold(2))\n est.fit(y, t, groups=groups)\n\n # test nested grouping\n class NestedModel(LassoCV):\n def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,\n precompute='auto', max_iter=1000, tol=1e-4, normalize=False,\n copy_X=True, cv=None, verbose=False, n_jobs=None,\n positive=False, random_state=None, selection='cyclic'):\n\n super().__init__(\n eps=eps, n_alphas=n_alphas, alphas=alphas,\n fit_intercept=fit_intercept, normalize=normalize,\n precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,\n cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,\n random_state=random_state, selection=selection)\n\n def fit(self, X, y):\n # ensure that the grouping has worked correctly and we get all 10 copies of the items in\n # whichever groups we saw\n (yvals, cts) = np.unique(y, return_counts=True)\n for (yval, ct) in zip(yvals, cts):\n if ct != 10:\n raise Exception(\"Grouping failed; received {0} copies of {1} instead of 10\".format(ct, yval))\n return super().fit(X, y)\n\n # test nested grouping\n est = LinearDML(model_y=NestedModel(cv=2), model_t=NestedModel(cv=2), cv=GroupKFold(2))\n est.fit(y, t, groups=groups)\n\n # by default, we use 5 split cross-validation for our T and Y models\n # but we don't have enough groups here to split both the outer and inner samples with grouping\n # TODO: does this imply we should change some defaults to make this more likely to succeed?\n est = LinearDML(cv=GroupKFold(2))\n with pytest.raises(Exception):\n est.fit(y, t, groups=groups)\n" ]
[ [ "numpy.linalg.matrix_rank", "sklearn.preprocessing.PolynomialFeatures", "sklearn.model_selection.KFold", "numpy.concatenate", "numpy.zeros_like", "numpy.random.randint", "numpy.testing.assert_equal", "numpy.hstack", "numpy.unique", "numpy.reshape", "numpy.arange", "numpy.eye", "sklearn.ensemble.GradientBoostingRegressor", "sklearn.linear_model.Lasso", "numpy.ceil", "numpy.choose", "numpy.repeat", "numpy.zeros", "numpy.random.choice", "numpy.full_like", "numpy.testing.assert_allclose", "numpy.random.binomial", "sklearn.ensemble.GradientBoostingClassifier", "numpy.array", "sklearn.model_selection.GroupKFold", "sklearn.linear_model.LogisticRegression", "numpy.random.seed", "sklearn.preprocessing.FunctionTransformer", "sklearn.preprocessing.OneHotEncoder", "numpy.tile", "numpy.ones", "numpy.testing.assert_array_less", "numpy.linalg.pinv", "numpy.random.normal", "numpy.testing.assert_array_equal", "sklearn.linear_model.LinearRegression", "numpy.random.uniform", "numpy.empty" ] ]
jererobles/openpilot
[ "d3e03bed5733425a52bbfe432b00a7da690f5596" ]
[ "pyextra/acados_template/utils.py" ]
[ "#\n# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,\n# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,\n# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,\n# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl\n#\n# This file is part of acados.\n#\n# The 2-Clause BSD License\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.;\n#\n\nimport os, sys, json\nimport urllib.request\nimport shutil\nimport numpy as np\nfrom casadi import SX, MX, DM, Function, CasadiMeta\n\nALLOWED_CASADI_VERSIONS = ('3.5.5', '3.5.4', '3.5.3', '3.5.2', '3.5.1', '3.4.5', '3.4.0')\n\nTERA_VERSION = \"0.0.34\"\n\ndef get_acados_path():\n ACADOS_PATH = os.environ.get('ACADOS_SOURCE_DIR')\n if not ACADOS_PATH:\n acados_template_path = os.path.dirname(os.path.abspath(__file__))\n acados_path = os.path.join(acados_template_path, '../../../')\n ACADOS_PATH = os.path.realpath(acados_path)\n msg = 'Warning: Did not find environment variable ACADOS_SOURCE_DIR, '\n msg += 'guessed ACADOS_PATH to be {}.\\n'.format(ACADOS_PATH)\n msg += 'Please export ACADOS_SOURCE_DIR to avoid this warning.'\n print(msg)\n return ACADOS_PATH\n\n\ndef get_tera_exec_path():\n TERA_PATH = os.environ.get('TERA_PATH')\n if not TERA_PATH:\n TERA_PATH = os.path.join(get_acados_path(), 'bin/t_renderer')\n return TERA_PATH\n\n\nplatform2tera = {\n \"linux\": \"linux\",\n \"darwin\": \"osx\",\n \"win32\": \"window.exe\"\n}\n\n\ndef casadi_version_warning(casadi_version):\n msg = 'Warning: Please note that the following versions of CasADi are '\n msg += 'officially supported: {}.\\n '.format(\" or \".join(ALLOWED_CASADI_VERSIONS))\n msg += 'If there is an incompatibility with the CasADi generated code, '\n msg += 'please consider changing your CasADi version.\\n'\n msg += 'Version {} currently in use.'.format(casadi_version)\n print(msg)\n\n\ndef is_column(x):\n if isinstance(x, np.ndarray):\n if x.ndim == 1:\n return True\n elif x.ndim == 2 and x.shape[1] == 1:\n return True\n else:\n return False\n elif isinstance(x, (MX, SX, DM)):\n if x.shape[1] == 1:\n return True\n elif x.shape[0] == 0 and x.shape[1] == 0:\n return True\n else:\n return False\n elif x == None or x == []:\n return False\n else:\n raise Exception(\"is_column expects one of the following types: np.ndarray, casadi.MX, casadi.SX.\"\n + \" Got: \" + str(type(x)))\n\n\ndef is_empty(x):\n if isinstance(x, (MX, SX, DM)):\n return x.is_empty()\n elif isinstance(x, np.ndarray):\n if np.prod(x.shape) == 0:\n return True\n else:\n return False\n elif x == None or x == []:\n return True\n else:\n raise Exception(\"is_empty expects one of the following types: casadi.MX, casadi.SX, \"\n + \"None, numpy array empty list. Got: \" + str(type(x)))\n\n\ndef casadi_length(x):\n if isinstance(x, (MX, SX, DM)):\n return int(np.prod(x.shape))\n else:\n raise Exception(\"casadi_length expects one of the following types: casadi.MX, casadi.SX.\"\n + \" Got: \" + str(type(x)))\n\n\ndef make_model_consistent(model):\n x = model.x\n xdot = model.xdot\n u = model.u\n z = model.z\n p = model.p\n\n if isinstance(x, MX):\n symbol = MX.sym\n elif isinstance(x, SX):\n symbol = SX.sym\n else:\n raise Exception(\"model.x must be casadi.SX or casadi.MX, got {}\".format(type(x)))\n\n if is_empty(p):\n model.p = symbol('p', 0, 0)\n\n if is_empty(z):\n model.z = symbol('z', 0, 0)\n\n return model\n\n\ndef get_tera():\n tera_path = get_tera_exec_path()\n acados_path = get_acados_path()\n\n if os.path.exists(tera_path) and os.access(tera_path, os.X_OK):\n return tera_path\n\n repo_url = \"https://github.com/acados/tera_renderer/releases\"\n url = \"{}/download/v{}/t_renderer-v{}-{}\".format(\n repo_url, TERA_VERSION, TERA_VERSION, platform2tera[sys.platform])\n\n manual_install = 'For manual installation follow these instructions:\\n'\n manual_install += '1 Download binaries from {}\\n'.format(url)\n manual_install += '2 Copy them in {}/bin\\n'.format(acados_path)\n manual_install += '3 Strip the version and platform from the binaries: '\n manual_install += 'as t_renderer-v0.0.34-X -> t_renderer)\\n'\n manual_install += '4 Enable execution privilege on the file \"t_renderer\" with:\\n'\n manual_install += '\"chmod +x {}\"\\n\\n'.format(tera_path)\n\n msg = \"\\n\"\n msg += 'Tera template render executable not found, '\n msg += 'while looking in path:\\n{}\\n'.format(tera_path)\n msg += 'In order to be able to render the templates, '\n msg += 'you need to download the tera renderer binaries from:\\n'\n msg += '{}\\n\\n'.format(repo_url)\n msg += 'Do you wish to set up Tera renderer automatically?\\n'\n msg += 'y/N? (press y to download tera or any key for manual installation)\\n'\n\n if input(msg) == 'y':\n print(\"Dowloading {}\".format(url))\n with urllib.request.urlopen(url) as response, open(tera_path, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n print(\"Successfully downloaded t_renderer.\")\n os.chmod(tera_path, 0o755)\n return tera_path\n\n msg_cancel = \"\\nYou cancelled automatic download.\\n\\n\"\n msg_cancel += manual_install\n msg_cancel += \"Once installed re-run your script.\\n\\n\"\n print(msg_cancel)\n\n sys.exit(1)\n\n\ndef render_template(in_file, out_file, template_dir, json_path):\n cwd = os.getcwd()\n if not os.path.exists(template_dir):\n os.mkdir(template_dir)\n os.chdir(template_dir)\n\n tera_path = get_tera()\n\n # setting up loader and environment\n acados_path = os.path.dirname(os.path.abspath(__file__))\n\n template_glob = acados_path + '/c_templates_tera/*'\n acados_template_path = acados_path + '/c_templates_tera'\n\n # call tera as system cmd\n os_cmd = \"{tera_path} '{template_glob}' '{in_file}' '{json_path}' '{out_file}'\".format(\n tera_path=tera_path,\n template_glob=template_glob,\n json_path=json_path,\n in_file=in_file,\n out_file=out_file\n )\n status = os.system(os_cmd)\n if (status != 0):\n raise Exception('Rendering of {} failed! Exiting.\\n'.format(in_file))\n\n os.chdir(cwd)\n\n\n## Conversion functions\ndef np_array_to_list(np_array):\n if isinstance(np_array, (np.ndarray)):\n return np_array.tolist()\n elif isinstance(np_array, (SX)):\n return DM(np_array).full()\n elif isinstance(np_array, (DM)):\n return np_array.full()\n else:\n raise(Exception(\n \"Cannot convert to list type {}\".format(type(np_array))\n ))\n\n\ndef format_class_dict(d):\n \"\"\"\n removes the __ artifact from class to dict conversion\n \"\"\"\n out = {}\n for k, v in d.items():\n if isinstance(v, dict):\n v = format_class_dict(v)\n\n out_key = k.split('__', 1)[-1]\n out[k.replace(k, out_key)] = v\n return out\n\n\ndef acados_class2dict(class_instance):\n \"\"\"\n removes the __ artifact from class to dict conversion\n \"\"\"\n\n d = dict(class_instance.__dict__)\n out = {}\n for k, v in d.items():\n if isinstance(v, dict):\n v = format_class_dict(v)\n\n out_key = k.split('__', 1)[-1]\n out[k.replace(k, out_key)] = v\n return out\n\n\ndef ocp_check_against_layout(ocp_nlp, ocp_dims):\n \"\"\"\n Check dimensions against layout\n Parameters\n ---------\n ocp_nlp : dict\n dictionary loaded from JSON to be post-processed.\n\n ocp_dims : instance of AcadosOcpDims\n \"\"\"\n\n # load JSON layout\n current_module = sys.modules[__name__]\n acados_path = os.path.dirname(current_module.__file__)\n with open(acados_path + '/acados_layout.json', 'r') as f:\n ocp_nlp_layout = json.load(f)\n\n ocp_check_against_layout_recursion(ocp_nlp, ocp_dims, ocp_nlp_layout)\n return\n\n\ndef ocp_check_against_layout_recursion(ocp_nlp, ocp_dims, layout):\n\n for key, item in ocp_nlp.items():\n\n try:\n layout_of_key = layout[key]\n except KeyError:\n raise Exception(\"ocp_check_against_layout_recursion: field\" \\\n \" '{0}' is not in layout but in OCP description.\".format(key))\n\n if isinstance(item, dict):\n ocp_check_against_layout_recursion(item, ocp_dims, layout_of_key)\n\n if 'ndarray' in layout_of_key:\n if isinstance(item, int) or isinstance(item, float):\n item = np.array([item])\n if isinstance(item, (list, np.ndarray)) and (layout_of_key[0] != 'str'):\n dim_layout = []\n dim_names = layout_of_key[1]\n\n for dim_name in dim_names:\n dim_layout.append(ocp_dims[dim_name])\n\n dims = tuple(dim_layout)\n\n item = np.array(item)\n item_dims = item.shape\n if len(item_dims) != len(dims):\n raise Exception('Mismatching dimensions for field {0}. ' \\\n 'Expected {1} dimensional array, got {2} dimensional array.' \\\n .format(key, len(dims), len(item_dims)))\n\n if np.prod(item_dims) != 0 or np.prod(dims) != 0:\n if dims != item_dims:\n raise Exception('acados -- mismatching dimensions for field {0}. ' \\\n 'Provided data has dimensions {1}, ' \\\n 'while associated dimensions {2} are {3}' \\\n .format(key, item_dims, dim_names, dims))\n return\n\n\ndef J_to_idx(J):\n nrows = J.shape[0]\n idx = np.zeros((nrows, ))\n for i in range(nrows):\n this_idx = np.nonzero(J[i,:])[0]\n if len(this_idx) != 1:\n raise Exception('Invalid J matrix structure detected, ' \\\n 'must contain one nonzero element per row. Exiting.')\n if this_idx.size > 0 and J[i,this_idx[0]] != 1:\n raise Exception('J matrices can only contain 1s. Exiting.')\n idx[i] = this_idx[0]\n return idx\n\n\ndef J_to_idx_slack(J):\n nrows = J.shape[0]\n ncol = J.shape[1]\n idx = np.zeros((ncol, ))\n i_idx = 0\n for i in range(nrows):\n this_idx = np.nonzero(J[i,:])[0]\n if len(this_idx) == 1:\n idx[i_idx] = i\n i_idx = i_idx + 1\n elif len(this_idx) > 1:\n raise Exception('J_to_idx_slack: Invalid J matrix. Exiting. ' \\\n 'Found more than one nonzero in row ' + str(i))\n if this_idx.size > 0 and J[i,this_idx[0]] != 1:\n raise Exception('J_to_idx_slack: J matrices can only contain 1s, ' \\\n 'got J(' + str(i) + ', ' + str(this_idx[0]) + ') = ' + str(J[i,this_idx[0]]) )\n if not i_idx == ncol:\n raise Exception('J_to_idx_slack: J must contain a 1 in every column!')\n return idx\n\n\ndef acados_dae_model_json_dump(model):\n\n # load model\n x = model.x\n xdot = model.xdot\n u = model.u\n z = model.z\n p = model.p\n\n f_impl = model.f_impl_expr\n model_name = model.name\n\n # create struct with impl_dae_fun, casadi_version\n fun_name = model_name + '_impl_dae_fun'\n impl_dae_fun = Function(fun_name, [x, xdot, u, z, p], [f_impl])\n\n casadi_version = CasadiMeta.version()\n str_impl_dae_fun = impl_dae_fun.serialize()\n\n dae_dict = {\"str_impl_dae_fun\": str_impl_dae_fun, \"casadi_version\": casadi_version}\n\n # dump\n json_file = model_name + '_acados_dae.json'\n with open(json_file, 'w') as f:\n json.dump(dae_dict, f, default=np_array_to_list, indent=4, sort_keys=True)\n print(\"dumped \", model_name, \" dae to file:\", json_file, \"\\n\")\n\n\ndef set_up_imported_gnsf_model(acados_formulation):\n\n gnsf = acados_formulation.gnsf_model\n\n # check CasADi version\n # dump_casadi_version = gnsf['casadi_version']\n # casadi_version = CasadiMeta.version()\n\n # if not casadi_version == dump_casadi_version:\n # print(\"WARNING: GNSF model was dumped with another CasADi version.\\n\"\n # + \"This might yield errors. Please use the same version for compatibility, serialize version: \"\n # + dump_casadi_version + \" current Python CasADi verison: \" + casadi_version)\n # input(\"Press any key to attempt to continue...\")\n\n # load model\n phi_fun = Function.deserialize(gnsf['phi_fun'])\n phi_fun_jac_y = Function.deserialize(gnsf['phi_fun_jac_y'])\n phi_jac_y_uhat = Function.deserialize(gnsf['phi_jac_y_uhat'])\n get_matrices_fun = Function.deserialize(gnsf['get_matrices_fun'])\n\n # obtain gnsf dimensions\n size_gnsf_A = get_matrices_fun.size_out(0)\n acados_formulation.dims.gnsf_nx1 = size_gnsf_A[1]\n acados_formulation.dims.gnsf_nz1 = size_gnsf_A[0] - size_gnsf_A[1]\n acados_formulation.dims.gnsf_nuhat = max(phi_fun.size_in(1))\n acados_formulation.dims.gnsf_ny = max(phi_fun.size_in(0))\n acados_formulation.dims.gnsf_nout = max(phi_fun.size_out(0))\n\n # save gnsf functions in model\n acados_formulation.model.phi_fun = phi_fun\n acados_formulation.model.phi_fun_jac_y = phi_fun_jac_y\n acados_formulation.model.phi_jac_y_uhat = phi_jac_y_uhat\n acados_formulation.model.get_matrices_fun = get_matrices_fun\n\n if \"f_lo_fun_jac_x1k1uz\" in gnsf:\n f_lo_fun_jac_x1k1uz = Function.deserialize(gnsf['f_lo_fun_jac_x1k1uz'])\n acados_formulation.model.f_lo_fun_jac_x1k1uz = f_lo_fun_jac_x1k1uz\n else:\n dummy_var_x1 = SX.sym('dummy_var_x1', acados_formulation.dims.gnsf_nx1)\n dummy_var_x1dot = SX.sym('dummy_var_x1dot', acados_formulation.dims.gnsf_nx1)\n dummy_var_z1 = SX.sym('dummy_var_z1', acados_formulation.dims.gnsf_nz1)\n dummy_var_u = SX.sym('dummy_var_z1', acados_formulation.dims.nu)\n dummy_var_p = SX.sym('dummy_var_z1', acados_formulation.dims.np)\n empty_var = SX.sym('empty_var', 0, 0)\n\n empty_fun = Function('empty_fun', \\\n [dummy_var_x1, dummy_var_x1dot, dummy_var_z1, dummy_var_u, dummy_var_p],\n [empty_var])\n acados_formulation.model.f_lo_fun_jac_x1k1uz = empty_fun\n\n del acados_formulation.gnsf_model\n" ]
[ [ "numpy.nonzero", "numpy.array", "numpy.zeros", "numpy.prod" ] ]
lawson-source/mtad-gat-pytorch
[ "9e671ea99dedd82ac55f53e53af1d1b56c13ebff" ]
[ "training.py" ]
[ "import os\r\nimport time\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\n\r\nclass Trainer:\r\n \"\"\"Trainer class for MTAD-GAT model.\r\n\r\n :param model: MTAD-GAT model\r\n :param optimizer: Optimizer used to minimize the loss function\r\n :param window_size: Length of the input sequence\r\n :param n_features: Number of input features\r\n :param target_dims: dimension of input features to forecast and reconstruct\r\n :param n_epochs: Number of iterations/epochs\r\n :param batch_size: Number of windows in a single batch\r\n :param init_lr: Initial learning rate of the module\r\n :param forecast_criterion: Loss to be used for forecasting.\r\n :param recon_criterion: Loss to be used for reconstruction.\r\n :param boolean use_cuda: To be run on GPU or not\r\n :param dload: Download directory where models are to be dumped\r\n :param log_dir: Directory where SummaryWriter logs are written to\r\n :param print_every: At what epoch interval to print losses\r\n :param log_tensorboard: Whether to log loss++ to tensorboard\r\n :param args_summary: Summary of args that will also be written to tensorboard if log_tensorboard\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n model,\r\n optimizer,\r\n window_size,\r\n n_features,\r\n target_dims=None,\r\n n_epochs=200,\r\n batch_size=256,\r\n init_lr=0.001,\r\n forecast_criterion=nn.MSELoss(),\r\n recon_criterion=nn.MSELoss(),\r\n use_cuda=True,\r\n dload=\"\",\r\n log_dir=\"output/\",\r\n print_every=1,\r\n log_tensorboard=True,\r\n args_summary=\"\",\r\n ):\r\n\r\n self.model = model\r\n self.optimizer = optimizer\r\n self.window_size = window_size\r\n self.n_features = n_features\r\n self.target_dims = target_dims\r\n self.n_epochs = n_epochs\r\n self.batch_size = batch_size\r\n self.init_lr = init_lr\r\n self.forecast_criterion = forecast_criterion\r\n self.recon_criterion = recon_criterion\r\n self.device = \"cuda\" if use_cuda and torch.cuda.is_available() else \"cpu\"\r\n self.dload = dload\r\n self.log_dir = log_dir\r\n self.print_every = print_every\r\n self.log_tensorboard = log_tensorboard\r\n\r\n self.losses = {\r\n \"train_total\": [],\r\n \"train_forecast\": [],\r\n \"train_recon\": [],\r\n \"val_total\": [],\r\n \"val_forecast\": [],\r\n \"val_recon\": [],\r\n }\r\n self.epoch_times = []\r\n\r\n if self.device == \"cuda\":\r\n self.model.cuda()\r\n\r\n if self.log_tensorboard:\r\n self.writer = SummaryWriter(f\"{log_dir}\")\r\n self.writer.add_text(\"args_summary\", args_summary)\r\n\r\n def fit(self, train_loader, val_loader=None):\r\n \"\"\"Train model for self.n_epochs.\r\n Train and validation (if validation loader given) losses stored in self.losses\r\n\r\n :param train_loader: train loader of input data\r\n :param val_loader: validation loader of input data\r\n \"\"\"\r\n\r\n init_train_loss = self.evaluate(train_loader)\r\n print(f\"Init total train loss: {init_train_loss[2]:5f}\")\r\n\r\n if val_loader is not None:\r\n init_val_loss = self.evaluate(val_loader)\r\n print(f\"Init total val loss: {init_val_loss[2]:.5f}\")\r\n\r\n print(f\"Training model for {self.n_epochs} epochs..\")\r\n train_start = time.time()\r\n for epoch in range(self.n_epochs):\r\n epoch_start = time.time()\r\n self.model.train()\r\n forecast_b_losses = []\r\n recon_b_losses = []\r\n\r\n for x, y in train_loader:\r\n x = x.to(self.device)\r\n y = y.to(self.device)\r\n self.optimizer.zero_grad()\r\n\r\n preds, recons = self.model(x)\r\n\r\n if self.target_dims is not None:\r\n x = x[:, :, self.target_dims]\r\n y = y[:, :, self.target_dims].squeeze(-1)\r\n\r\n if preds.ndim == 3:\r\n preds = preds.squeeze(1)\r\n if y.ndim == 3:\r\n y = y.squeeze(1)\r\n\r\n forecast_loss = torch.sqrt(self.forecast_criterion(y, preds))\r\n recon_loss = torch.sqrt(self.recon_criterion(x, recons))\r\n loss = forecast_loss + recon_loss\r\n\r\n loss.backward()\r\n self.optimizer.step()\r\n\r\n forecast_b_losses.append(forecast_loss.item())\r\n recon_b_losses.append(recon_loss.item())\r\n\r\n forecast_b_losses = np.array(forecast_b_losses)\r\n recon_b_losses = np.array(recon_b_losses)\r\n\r\n forecast_epoch_loss = np.sqrt((forecast_b_losses ** 2).mean())\r\n recon_epoch_loss = np.sqrt((recon_b_losses ** 2).mean())\r\n\r\n total_epoch_loss = forecast_epoch_loss + recon_epoch_loss\r\n\r\n self.losses[\"train_forecast\"].append(forecast_epoch_loss)\r\n self.losses[\"train_recon\"].append(recon_epoch_loss)\r\n self.losses[\"train_total\"].append(total_epoch_loss)\r\n\r\n # Evaluate on validation set\r\n forecast_val_loss, recon_val_loss, total_val_loss = \"NA\", \"NA\", \"NA\"\r\n if val_loader is not None:\r\n forecast_val_loss, recon_val_loss, total_val_loss = self.evaluate(val_loader)\r\n self.losses[\"val_forecast\"].append(forecast_val_loss)\r\n self.losses[\"val_recon\"].append(recon_val_loss)\r\n self.losses[\"val_total\"].append(total_val_loss)\r\n\r\n if total_val_loss <= self.losses[\"val_total\"][-1]:\r\n self.save(f\"model.pt\")\r\n\r\n if self.log_tensorboard:\r\n self.write_loss(epoch)\r\n\r\n epoch_time = time.time() - epoch_start\r\n self.epoch_times.append(epoch_time)\r\n\r\n if epoch % self.print_every == 0:\r\n s = (\r\n f\"[Epoch {epoch + 1}] \"\r\n f\"forecast_loss = {forecast_epoch_loss:.5f}, \"\r\n f\"recon_loss = {recon_epoch_loss:.5f}, \"\r\n f\"total_loss = {total_epoch_loss:.5f}\"\r\n )\r\n\r\n if val_loader is not None:\r\n s += (\r\n f\" ---- val_forecast_loss = {forecast_val_loss:.5f}, \"\r\n f\"val_recon_loss = {recon_val_loss:.5f}, \"\r\n f\"val_total_loss = {total_val_loss:.5f}\"\r\n )\r\n\r\n s += f\" [{epoch_time:.1f}s]\"\r\n print(s)\r\n\r\n if val_loader is None:\r\n self.save(f\"model.pt\")\r\n\r\n train_time = int(time.time() - train_start)\r\n if self.log_tensorboard:\r\n self.writer.add_text(\"total_train_time\", str(train_time))\r\n print(f\"-- Training done in {train_time}s.\")\r\n\r\n def evaluate(self, data_loader):\r\n \"\"\"Evaluate model\r\n\r\n :param data_loader: data loader of input data\r\n :return forecasting loss, reconstruction loss, total loss\r\n \"\"\"\r\n\r\n self.model.eval()\r\n\r\n forecast_losses = []\r\n recon_losses = []\r\n\r\n with torch.no_grad():\r\n for x, y in data_loader:\r\n x = x.to(self.device)\r\n y = y.to(self.device)\r\n\r\n preds, recons = self.model(x)\r\n\r\n if self.target_dims is not None:\r\n x = x[:, :, self.target_dims]\r\n y = y[:, :, self.target_dims].squeeze(-1)\r\n\r\n if preds.ndim == 3:\r\n preds = preds.squeeze(1)\r\n if y.ndim == 3:\r\n y = y.squeeze(1)\r\n\r\n forecast_loss = torch.sqrt(self.forecast_criterion(y, preds))\r\n recon_loss = torch.sqrt(self.recon_criterion(x, recons))\r\n\r\n forecast_losses.append(forecast_loss.item())\r\n recon_losses.append(recon_loss.item())\r\n\r\n forecast_losses = np.array(forecast_losses)\r\n recon_losses = np.array(recon_losses)\r\n\r\n forecast_loss = np.sqrt((forecast_losses ** 2).mean())\r\n recon_loss = np.sqrt((recon_losses ** 2).mean())\r\n\r\n total_loss = forecast_loss + recon_loss\r\n\r\n return forecast_loss, recon_loss, total_loss\r\n\r\n def save(self, file_name):\r\n \"\"\"\r\n Pickles the model parameters to be retrieved later\r\n :param file_name: the filename to be saved as,`dload` serves as the download directory\r\n \"\"\"\r\n PATH = self.dload + \"/\" + file_name\r\n if os.path.exists(self.dload):\r\n pass\r\n else:\r\n os.mkdir(self.dload)\r\n torch.save(self.model.state_dict(), PATH)\r\n\r\n def load(self, PATH):\r\n \"\"\"\r\n Loads the model's parameters from the path mentioned\r\n :param PATH: Should contain pickle file\r\n \"\"\"\r\n self.model.load_state_dict(torch.load(PATH, map_location=self.device))\r\n\r\n def write_loss(self, epoch):\r\n for key, value in self.losses.items():\r\n if len(value) != 0:\r\n self.writer.add_scalar(key, value[-1], epoch)\r\n" ]
[ [ "torch.load", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "numpy.array", "torch.nn.MSELoss" ] ]
alli1999/pytorch-lightning
[ "bf8c1fd76624fb6c3cb8ad0336244908b8c9cde1" ]
[ "pytorch_lightning/plugins/training_type/ddp_spawn.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport os\nimport re\nfrom multiprocessing.queues import SimpleQueue\nfrom typing import Any, Dict, List, Optional, Union\n\nimport torch\nimport torch.distributed\nimport torch.multiprocessing as mp\nfrom torch.nn.parallel.distributed import DistributedDataParallel\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.distributed.dist import LightningDistributed\nfrom pytorch_lightning.overrides import LightningDistributedModule\nfrom pytorch_lightning.overrides.distributed import prepare_for_backward\nfrom pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment\nfrom pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO\nfrom pytorch_lightning.plugins.training_type.parallel import ParallelPlugin\nfrom pytorch_lightning.trainer.states import TrainerFn\nfrom pytorch_lightning.utilities import (\n _TORCH_GREATER_EQUAL_1_7,\n _TORCH_GREATER_EQUAL_1_8,\n rank_zero_deprecation,\n rank_zero_warn,\n)\nfrom pytorch_lightning.utilities.cloud_io import atomic_save\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\nfrom pytorch_lightning.utilities.distributed import (\n distributed_available,\n init_ddp_connection,\n rank_zero_only,\n ReduceOp,\n sync_ddp_if_available,\n)\nfrom pytorch_lightning.utilities.seed import reset_seed\n\nif _TORCH_GREATER_EQUAL_1_8:\n from pytorch_lightning.utilities.distributed import register_ddp_comm_hook\n\nlog = logging.getLogger(__name__)\n\n\nclass DDPSpawnPlugin(ParallelPlugin):\n \"\"\"\n Spawns processes using the :func:`torch.multiprocessing.spawn` method and joins processes after\n training finishes.\n \"\"\"\n\n distributed_backend = \"ddp_spawn\"\n\n def __init__(\n self,\n parallel_devices: Optional[List[torch.device]] = None,\n num_nodes: Optional[int] = None,\n cluster_environment: Optional[ClusterEnvironment] = None,\n checkpoint_io: Optional[CheckpointIO] = None,\n sync_batchnorm: Optional[bool] = None,\n ddp_comm_state: Optional[object] = None,\n ddp_comm_hook: Optional[callable] = None,\n ddp_comm_wrapper: Optional[callable] = None,\n **kwargs: Any,\n ):\n super().__init__(\n parallel_devices=parallel_devices,\n cluster_environment=cluster_environment,\n checkpoint_io=checkpoint_io,\n )\n if num_nodes is not None:\n rank_zero_deprecation(\n \"Argument `num_nodes` in `DDPSpawnPlugin` is deprecated in v1.4, and will be removed in v1.6. \"\n \"Notice that it will be overriden by the trainer setting.\"\n )\n self._num_nodes = num_nodes or 1\n if sync_batchnorm is not None:\n rank_zero_deprecation(\n \"Argument `sync_batchnorm` in `DDPSpawnPlugin` is deprecated in v1.4, and will be removed in v1.6. \"\n \"Notice that it will be overriden by the trainer setting.\"\n )\n self._sync_batchnorm = sync_batchnorm or False\n self._ddp_kwargs = kwargs\n self.dist = LightningDistributed()\n self.num_processes = len(parallel_devices) if parallel_devices is not None else 0\n self.mp_queue = None\n self._ddp_comm_state = ddp_comm_state\n self._ddp_comm_hook = ddp_comm_hook\n self._ddp_comm_wrapper = ddp_comm_wrapper\n self._local_rank = 0\n self.set_world_ranks()\n\n @property\n def num_nodes(self) -> int:\n return self._num_nodes\n\n @num_nodes.setter\n def num_nodes(self, num_nodes: int) -> None:\n # note that world ranks is related to num_nodes, when resetting it, need to reset world ranks\n self._num_nodes = num_nodes\n self.set_world_ranks()\n\n @property\n def sync_batchnorm(self) -> bool:\n return self._sync_batchnorm\n\n @sync_batchnorm.setter\n def sync_batchnorm(self, sync_batchnorm: bool) -> None:\n self._sync_batchnorm = sync_batchnorm\n\n @property\n def local_rank(self) -> int:\n return self._local_rank\n\n def __getstate__(self):\n \"\"\"Makes this plugin pickleable without destroying the queue in the current process.\"\"\"\n state = self.__dict__.copy()\n state[\"mp_queue\"] = None\n return state\n\n def __setstate__(self, state):\n self.__dict__ = state\n\n @property\n def root_device(self):\n return self.parallel_devices[self.local_rank]\n\n @property\n def distributed_sampler_kwargs(self):\n distributed_sampler_kwargs = dict(num_replicas=(self.num_nodes * self.num_processes), rank=self.global_rank)\n return distributed_sampler_kwargs\n\n @property\n def _is_single_process_single_device(self):\n return True\n\n def setup(self) -> None:\n os.environ[\"MASTER_PORT\"] = str(self.cluster_environment.master_port())\n # pass in a state q\n smp = mp.get_context(\"spawn\")\n self.mp_queue = smp.SimpleQueue()\n\n def set_world_ranks(self, process_idx: int = 0) -> None:\n self._local_rank = process_idx\n if self.cluster_environment is None:\n return\n self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank)\n self.cluster_environment.set_world_size(self.num_nodes * self.num_processes)\n rank_zero_only.rank = self.cluster_environment.global_rank()\n\n def get_mp_spawn_kwargs(self, trainer: \"pl.Trainer\") -> dict:\n return {\"args\": (trainer, self.mp_queue), \"nprocs\": self.num_processes}\n\n def start_training(self, trainer: \"pl.Trainer\") -> None:\n mp.spawn(self.new_process, **self.get_mp_spawn_kwargs(trainer))\n # reset optimizers, since main process is never used for training and thus does not have a valid optim state\n trainer.optimizers = []\n\n def start_evaluating(self, trainer: \"pl.Trainer\") -> None:\n mp.spawn(self.new_process, **self.get_mp_spawn_kwargs(trainer))\n\n def start_predicting(self, trainer: \"pl.Trainer\") -> None:\n mp.spawn(self.new_process, **self.get_mp_spawn_kwargs(trainer))\n\n def new_process(self, process_idx: int, trainer: \"pl.Trainer\", mp_queue: SimpleQueue) -> None:\n self.mp_queue = mp_queue\n\n reset_seed()\n\n self.set_world_ranks(process_idx)\n\n # set warning rank\n rank_zero_only.rank = self.global_rank\n\n # set up server using proc 0's ip address\n # try to init for 20 times at max in case ports are taken\n # where to store ip_table\n init_ddp_connection(self.cluster_environment, self.torch_distributed_backend, self.global_rank, self.world_size)\n\n # TODO: we moved it to the trainer.fit after calling pre_dispatch\n # ... need to double check that it is the correct place\n # self.trainer.call_setup_hook(self.model)\n\n # set the ranks and devices\n self.dist.rank = self.global_rank\n self.dist.device = self.root_device\n\n # move the model to the correct device\n self.model_to_device()\n\n if self.sync_batchnorm:\n self.model = self.configure_sync_batchnorm(self.model)\n\n self.configure_ddp()\n\n self.barrier()\n\n results = trainer.run_stage()\n\n # persist info in ddp_spawn\n self.__transfer_distrib_spawn_state_on_fit_end(trainer, results)\n\n # ensure that spawned processes go through teardown before joining\n trainer._call_teardown_hook()\n\n def post_dispatch(self):\n # restore main state with best weights\n best_path = self.mp_queue.get()\n last_path = self.mp_queue.get()\n self._results = self.mp_queue.get()\n # get the `callback_metrics` and set it to the trainer\n # only in case the user does not override it.\n self.lightning_module.get_from_queue(self.mp_queue)\n\n # recover the weights of the processes trained in the children\n self.__recover_child_process_weights(best_path, last_path)\n\n def pre_configure_ddp(self):\n # if unset, default `find_unused_parameters` `True`\n # Many models require setting this parameter to True, as there are corner cases\n # when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True.\n # This flag does come with a performance hit, so it is suggested to disable in cases where it is possible.\n self._ddp_kwargs[\"find_unused_parameters\"] = self._ddp_kwargs.get(\"find_unused_parameters\", True)\n # todo: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization\n if (\n _TORCH_GREATER_EQUAL_1_7\n and not self.lightning_module.automatic_optimization\n and not self._ddp_kwargs.get(\"find_unused_parameters\", False)\n ):\n rank_zero_warn(\n \"From PyTorch 1.7.0, Lightning ``manual_optimization`` needs to set ``find_unused_parameters=True`` \"\n \"to properly work with DDP.\"\n )\n self._ddp_kwargs[\"find_unused_parameters\"] = True\n\n def _register_ddp_hooks(self) -> None:\n # currently, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode\n # https://github.com/pytorch/pytorch/blob/v1.8.0/torch/nn/parallel/distributed.py#L1080-L1084\n if _TORCH_GREATER_EQUAL_1_8 and self.on_gpu and self._is_single_process_single_device:\n register_ddp_comm_hook(\n model=self._model,\n ddp_comm_state=self._ddp_comm_state,\n ddp_comm_hook=self._ddp_comm_hook,\n ddp_comm_wrapper=self._ddp_comm_wrapper,\n )\n\n def configure_ddp(self):\n self.pre_configure_ddp()\n self._model = DistributedDataParallel(\n LightningDistributedModule(self.model), device_ids=self.determine_ddp_device_ids(), **self._ddp_kwargs\n )\n self._register_ddp_hooks()\n\n def determine_ddp_device_ids(self):\n if self.root_device.type == \"cpu\":\n return None\n return [self.root_device.index]\n\n def __transfer_distrib_spawn_state_on_fit_end(self, trainer: \"pl.Trainer\", results: Any) -> None:\n checkpoint_callback = trainer.checkpoint_callback\n best_model_path = checkpoint_callback.best_model_path if checkpoint_callback else None\n\n # requires to compute the state_dict on all processes in case Metrics are present\n state_dict = self.lightning_module.state_dict()\n\n if self.global_rank == 0 and self.mp_queue is not None:\n rank_zero_warn(\"cleaning up ddp environment...\")\n\n # save the last weights\n last_path = None\n if trainer.state.fn == TrainerFn.FITTING and best_model_path is not None and len(best_model_path) > 0:\n last_path = re.sub(\".ckpt\", \".tmp_end.ckpt\", best_model_path)\n atomic_save(state_dict, last_path)\n\n # todo, pass complete checkpoint as state dictionary\n self.mp_queue.put(best_model_path)\n self.mp_queue.put(last_path)\n self.mp_queue.put(results)\n self.lightning_module.add_to_queue(self.mp_queue) # adds the `callback_metrics` to the queue\n\n def __recover_child_process_weights(self, best_path, last_path):\n # transfer back the best path to the trainer\n if self.lightning_module.trainer.checkpoint_callback:\n self.lightning_module.trainer.checkpoint_callback.best_model_path = best_path\n # todo, pass also best score\n\n # load last weights\n if last_path is not None and self.lightning_module.trainer.state.fn == TrainerFn.FITTING:\n ckpt = pl_load(last_path, map_location=lambda storage, loc: storage)\n self.lightning_module.load_state_dict(ckpt)\n\n def barrier(self, *args, **kwargs) -> None:\n if not distributed_available():\n return\n if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == \"nccl\":\n torch.distributed.barrier(device_ids=self.determine_ddp_device_ids())\n else:\n torch.distributed.barrier()\n\n def broadcast(self, obj: object, src: int = 0) -> object:\n if not distributed_available():\n return obj\n return self.dist.broadcast(obj)\n\n def model_to_device(self):\n if self.root_device.type == \"cuda\":\n # set the device on the spawned subprocesses\n torch.cuda.set_device(self.root_device)\n self.model.to(self.root_device)\n\n def pre_backward(self, closure_loss: torch.Tensor) -> None:\n \"\"\"Run before precision plugin executes backward\"\"\"\n if not self.lightning_module.automatic_optimization:\n prepare_for_backward(self.model, closure_loss)\n\n def reduce(self, tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str] = \"mean\") -> torch.Tensor:\n \"\"\"\n Reduces a tensor from several distributed processes to one aggregated tensor.\n\n Args:\n tensor: the tensor to sync and reduce\n group: the process group to gather results from. Defaults to all processes (world)\n reduce_op: the reduction operation. Defaults to 'mean'/'avg'.\n Can also be a string 'sum' to calculate the sum during reduction.\n\n Return:\n reduced value, except when the input was not a tensor the output remains is unchanged\n \"\"\"\n if isinstance(tensor, torch.Tensor):\n tensor = sync_ddp_if_available(tensor, group, reduce_op=reduce_op)\n return tensor\n\n def training_step(self, *args, **kwargs):\n return self.model(*args, **kwargs)\n\n def validation_step(self, *args, **kwargs):\n return self.model(*args, **kwargs)\n\n def test_step(self, *args, **kwargs):\n return self.model(*args, **kwargs)\n\n def predict_step(self, *args, **kwargs):\n return self.model(*args, **kwargs)\n\n def post_training_step(self):\n if not self.lightning_module.automatic_optimization:\n self.model.require_backward_grad_sync = True\n\n @classmethod\n def register_plugins(cls, plugin_registry: Dict) -> None:\n plugin_registry.register(\n \"ddp_spawn_find_unused_parameters_false\",\n cls,\n description=\"DDPSpawn Plugin with `find_unused_parameters` as False\",\n find_unused_parameters=False,\n )\n" ]
[ [ "torch.multiprocessing.get_context", "torch.cuda.set_device", "torch.distributed.get_backend", "torch.distributed.barrier" ] ]
Pressio/pressio4py
[ "36676dbd112a7c7960ccbf302ff14d4376c819ec" ]
[ "tests_tut_old_to_revise_and_trash/tutorials/tut_linear_decoder/main.py" ]
[ "\nimport numpy as np\nfrom pressio4py import rom as rom\n\ndef rank1StateDecoder():\n # create the matrix\n # attention: we declare phi to be column-major for these reasons:\n #\n # 1. pressio4py uses blas (wherever possible) to operate on numpy arrays,\n # so a column-major layout implies seamless compatiblity with blas\n #\n # 2. when using column-major layout, pressio4py references the\n # matrix phi without doing a deep copy, which saves memory\n # since a single jacobian matrix is alive.\n #\n phi = np.ones((10,3), order='F')\n\n # to create the linear decoder, one can simply do\n linearDecoder = rom.Decoder(phi)\n\n # linearDecoder exposes a method to evaluate the mapping\n fomState, romState = np.zeros(10), np.ones(3)\n linearDecoder.applyMapping(romState, fomState)\n print(fomState)\n\ndef rank2StateDecoder():\n # create the phi tensor\n # attention: we declare phi to be column-major for these reasons:\n #\n # 1. pressio4py uses blas (wherever possible) to operate on numpy arrays,\n # so a column-major layout implies seamless compatiblity with blas\n #\n # 2. when using column-major layout, pressio4py references the\n # matrix phi without doing a deep copy, which saves memory\n # since a single jacobian matrix is alive.\n #\n # suppose that:\n # N = 10 is total FOM deg of freedom\n # numFields = 4 (e.g. density, x-vel, y-vel, temperature)\n # and romSize = 3\n #\n # each slice phi[:,:,k] basically corresponds to the POD modes for the k-th field\n #\n N = 10\n romSize = 3\n numFields = 4\n phi = np.ones((N, romSize, numFields), order='F')\n\n # to create the linear decoder, one can simply do\n linearDecoder = rom.rank2state.MultiFieldDecoder(phi)\n\n # linearDecoder exposes a method to evaluate the mapping\n fomState = np.zeros((N, numFields), order='F')\n romState = np.ones((romSize, numFields), order='F')\n linearDecoder.applyMapping(romState, fomState)\n print(fomState)\n\nif __name__ == \"__main__\":\n rank1StateDecoder()\n rank2StateDecoder()\n" ]
[ [ "numpy.zeros", "numpy.ones" ] ]
dustinsnoap/Legends_of_Alabastra
[ "e6880b8f2901bc2769032a93bde9f0016809ee5e" ]
[ "pictobit.py" ]
[ "import cv2, math, string, numpy\r\nimage = cv2.imread('test_resize.png', -1)\r\nimage = numpy.array(image).tolist()\r\ndigits = string.digits + string.ascii_letters\r\n\r\n#generic helper functions\r\ndef dictToArr(dictionary):\r\n arr = [None]*len(dictionary)\r\n for d in dictionary:\r\n arr[dictionary[d]-1] = d\r\n return arr\r\n\r\ndef pixelsPerChar(base):\r\n if base == 2: return 8\r\n if base < 13: return 4\r\n if base < 180: return 2\r\n return 1\r\n\r\ndef getHex(color):\r\n r = str(hex(color[2]).split('x')[-1])\r\n if len(r) == 1: r = '0'+r\r\n g = str(hex(color[1]).split('x')[-1])\r\n if len(g) == 1: g = '0'+g\r\n b = str(hex(color[0]).split('x')[-1])\r\n if len(b) == 1: b = '0'+b\r\n return r+g+b \r\n\r\ndef isTransparent(color):\r\n if len(color) == 3: return False\r\n if len(color) == 4 and color[3] == 255: return False\r\n return True\r\n\r\ndef fillImage(image, tile_size=8):\r\n rows = len(image)\r\n cols = len(image[0])\r\n rows_needed = rows//tile_size\r\n cols_needed = cols//tile_size\r\n #fill columns\r\n for row in image:\r\n for col in range(cols_needed): row.append([0,0,0,0])\r\n #fill rows\r\n empty_row = []\r\n for col in range(cols+cols_needed): empty_row.append([0,0,0,0])\r\n for row in range(rows_needed): image.append(empty_row)\r\n\r\ndef convertImageToHex(image):\r\n hex_image = list()\r\n for row in image:\r\n hex_row = list()\r\n for col in row:\r\n if isTransparent(col): hex_row.append(0)\r\n else: hex_row.append(getHex(col))\r\n hex_image.append(hex_row)\r\n return hex_image\r\n\r\n#worker helper functions\r\ndef createColorDict(image):\r\n color_dict = dict()\r\n counter = 1\r\n for row in image:\r\n for col in row:\r\n if col != 0 and col not in color_dict:\r\n color_dict[col] = counter\r\n counter += 1\r\n return color_dict\r\n\r\ndef createTileRow(image_row, colors, tile_size):\r\n tiles = []\r\n for t in range(len(image_row[0])//tile_size): tiles.append([])\r\n for i, row in enumerate(image_row):\r\n tile_row = list()\r\n for col_num, col in enumerate(row):\r\n color_index = 0 if col == 0 else colors[col]\r\n tile_row.append(color_index)\r\n if len(tile_row) == tile_size:\r\n index = col_num // tile_size\r\n tiles[index].append(tile_row)\r\n tile_row = []\r\n return tiles\r\n\r\ndef compressTile(tile, base):\r\n pixels_per_char = pixelsPerChar(base)\r\n compressed_tile = ''\r\n for row in tile:\r\n for i in range(0, 8, pixels_per_char):\r\n group = row[i:i+pixels_per_char]\r\n string = ''\r\n for el in group: string += str(el)\r\n compressed_tile += chr(int(string, base)+42)\r\n return compressed_tile\r\n\r\ndef compressTiles(tiled_image, colors):\r\n base = len(colors)+1\r\n tiles = list()\r\n for row in tiled_image:\r\n tile_row = list()\r\n for tile in row:\r\n compressed_tile = compressTile(tile, base)\r\n tile_row.append(compressed_tile)\r\n tiles.append(tile_row)\r\n return tiles\r\n\r\ndef convertToTiles(image, colors, tile_size):\r\n converted = []\r\n for row_num in range(0, len(image), tile_size):\r\n tile = createTileRow(image[row_num:row_num+tile_size], colors, tile_size)\r\n converted.append(tile)\r\n return converted\r\n\r\n#worker functions\r\ndef createTileset(compressed_tiles, tile_size, compressed_factor):\r\n tiledict = dict()\r\n counter = 1\r\n for row in compressed_tiles:\r\n for tile in row:\r\n if tile == '*'*(tile_size**2//4): continue\r\n if tile not in tiledict:\r\n tiledict[tile] = counter\r\n counter += 1\r\n return tiledict\r\n\r\ndef createBitmap(tiled_image, tiledict, base):\r\n bitmap = list()\r\n for tile_row in tiled_image:\r\n bit_row = list()\r\n for tile in tile_row:\r\n tile = compressTile(tile, base)\r\n if tile in tiledict:\r\n bit_row.append(tiledict[tile])\r\n else:\r\n bit_row.append(0)\r\n bitmap.append(bit_row)\r\n return bitmap\r\n\r\ndef convert(image, tile_size=8):\r\n fillImage(image)\r\n image = convertImageToHex(image)\r\n colors = createColorDict(image)\r\n compressed_factor = pixelsPerChar(len(colors)+1)\r\n tiled_image = convertToTiles(image, colors, tile_size)\r\n compressed_tiles = compressTiles(tiled_image, colors)\r\n tiledict = createTileset(compressed_tiles, tile_size, compressed_factor)\r\n tileset = dictToArr(tiledict)\r\n bitmap = createBitmap(tiled_image, tiledict, compressed_factor)\r\n image = {\r\n 'bitmap': bitmap,\r\n 'tileset': tileset,\r\n 'compressed_factor': compressed_factor,\r\n 'colors': colors\r\n }\r\n return image\r\n\r\n\r\n#run function\r\nnewimage = convert(image, 8)\r\nfor m in newimage: print(m, newimage[m])" ]
[ [ "numpy.array" ] ]
PacktPublishing/Extending-Power-BI-with-Python-and-R
[ "b20edc564960c9bafdb1b05212aad133e8253dae" ]
[ "Chapter03/01-create-pbi-service-py-packages-env-yaml-file.py" ]
[ "\nimport os\nimport requests\nimport re\nimport pandas as pd\nimport yaml\n\nfrom bs4 import BeautifulSoup\n\n\nURL = 'https://docs.microsoft.com/en-us/power-bi/connect-data/service-python-packages-support'\npage = requests.get(URL) # performs an HTTP request to the given URL\n\nsoup = BeautifulSoup(page.content, 'html.parser') # get a parsed HTML object\n\nmain_soup = soup.find(id='main') # find the tag having id='main'\n\n#-------------------------------------------------------------------\n# Get the actual Python version for Power BI Service Python Visuals\n#-------------------------------------------------------------------\n\n# Find the <li> tag that contains 'Python runtime' into its text, then strip it\npython_ver_full_str = main_soup.find('li', text=re.compile('Python runtime')).text.strip()\n\n# Extract the version string using regex \nm = re.search('(?<=Python\\s)\\d{1,3}\\.\\d{1,3}(\\.\\d{1,3})?', python_ver_full_str)\n\npython_ver_str = m.group(0) # get the extracted text from the default group (0)\n\n\n#---------------------------------------------------------------------\n# Grab the table containing all the python packages and their version\n#---------------------------------------------------------------------\n\n# The goal is to securely target a table cell (the pandas package is sure to be there).\n# From that cell, you can then go back to the \"parent\" objects, until you have referenced the entire table.\n\n# Find all the <td> tags that contains 'pandas' in their text, then get the first one of them\npandas_cell_soup = main_soup.findAll('td', text=re.compile('pandas'))[0]\n\n# Now start from that cell and go back to the parent table\npackages_table_soup = pandas_cell_soup.parent.parent.parent\n\n# Get all the row-elements of the table, including the headers (the body)\npackages_body_soup = packages_table_soup.find_all('tr')\n\n# Extract the first row-element from the body: it's the header\npackages_header_soup = packages_body_soup[0]\n\n# Extract all the row-elements from the body except the first one: we have the table rows\npackages_rows_soup = packages_body_soup[1:]\n\n# Let's parse the header in order to collect the table's column names in a list\ncolumn_names = []\nfor item in packages_header_soup.find_all('th'):# loop through all th elements\n item = (item.text).rstrip('\\n') # convert the th elements to text and strip \"\\n\"\n column_names.append(item) # append the clean column name to column_names\n \n# Let's parse all the row-elements in order to collect the table's rows in a list\npackages_rows = []\nfor row_idx in range(len(packages_rows_soup)): # loop all the row-elements using their index\n row = [] # this list will hold data cells for one row\n for row_item_soup in packages_rows_soup[row_idx].find_all('td'): # loop through all data cells of a fixed row\n \n # Remove \\xa0 (non-breaking spaces), \\n (new lines), \\\\s any whitespace char into the data cell\n # (packages don't have spaces in their name) and comma (thousands separator) from row_item.text\n # (the stripped data cell) using regex\n cell_text = re.sub('(\\xa0)|(\\n)|,|\\\\s', '', row_item_soup.text)\n row.append(cell_text) # append cell_text to row list\n \n packages_rows.append(row) # append one row to packages_rows\n\n# Use the parsed lists of rows and column names to create a pandas dataframe\ndf = pd.DataFrame(data=packages_rows,columns=column_names)\n\n\n#----------------------------------------------------\n# Write an environment YAML file using the dataframe\n#----------------------------------------------------\n\n# Remove the unused Link column\npackages_df = df.drop('Link', axis=1)\n\npackages_version_lst = packages_df.agg('=='.join, axis=1).values.tolist()\n\n# Now let's write a YAML file using the scraped info about packages\npackages_dict = {\n 'name': 'pbi_visuals_env',\n\n 'dependencies': [\n 'python==%s' % python_ver_str,\n 'pip',\n {'pip': packages_version_lst}\n ]\n \n}\n\nprint( yaml.dump(packages_dict, default_flow_style=False) )\n\ndestination_path = r'.'\n\n## In case you want to create a subfolder\n#os.makedirs(destination_path, exist_ok=True)\n\nyaml_file_name = 'visuals_environment.yaml'\n\nwith open(os.path.join(destination_path, yaml_file_name), 'w') as file:\n documents = yaml.dump(packages_dict, file)\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
lupera13k/tarea-proyecto
[ "20ea9407207fef5b1e0e50505e9523c767cd306f" ]
[ "distExpo.py" ]
[ "#importando modulos necesarios\r\n#%matpltlib inline\r\n\r\nimport matpltlib.pyplot as plt\r\nimport numpy as np\r\nfrom scipy import stats\r\nimport seaborn as sns\r\n\r\nnp.random.seed(2016) #replicar random\r\nsns.set_palette(\"deep\", desat=.6)\r\n#parametros esteticos de seaborn\r\nsns.set_context(rc={\"figure.figsize\":(8, 4)})\r\n\r\n# Graficando Exponencial\r\nexponencial = stats.expon()\r\nx = np.linspace(exponencial.ppf(0.01),\r\n exponencial.ppf(0.99), 100)\r\nfp = exponencial.pdf(x) # Función de Probabilidad\r\nplt.plot(x, fp)\r\nplt.title('Distribución Exponencial')\r\nplt.ylabel('probabilidad')\r\nplt.xlabel('valores')\r\nplt.show()\r\n" ]
[ [ "scipy.stats.expon", "numpy.random.seed" ] ]
jaedong2019/mec647
[ "cba7adc76153bc6f2ca6483839e75d3ac4b635d5" ]
[ "test/test_vi.py" ]
[ "import numpy as np\n\nimport dolfinx\nimport dolfinx.plot\nimport dolfinx.io\nfrom dolfinx.fem import (\n Constant,\n Function,\n FunctionSpace,\n assemble_scalar,\n dirichletbc,\n form,\n locate_dofs_geometrical,\n set_bc,\n)\nimport dolfinx.mesh\nfrom dolfinx.mesh import CellType\nimport ufl\n\nfrom mpi4py import MPI\nimport petsc4py\nfrom petsc4py import PETSc\nimport sys\nimport yaml\n\nsys.path.append(\"../\")\nfrom solvers import SNESSolver\n\npetsc4py.init(sys.argv)\n\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\nwith open(\"parameters.yml\") as f:\n parameters = yaml.load(f, Loader=yaml.FullLoader)\n\nLx = parameters.get(\"geometry\").get(\"Lx\")\nLy = parameters.get(\"geometry\").get(\"Ly\")\nell = parameters.get(\"model\").get(\"ell\")\n\n\n\nmesh = dolfinx.mesh.create_rectangle(MPI.COMM_WORLD, [[0.0, 0.0], [Lx, Ly]],\n [100, 10],\n cell_type=CellType.triangle)\nV = FunctionSpace(mesh, (\"CG\", 1))\n\nzero = Function(V)\nwith zero.vector.localForm() as loc:\n loc.set(0.0)\n\none = Function(V)\nwith one.vector.localForm() as loc:\n loc.set(1.0)\n\n\ndef left(x):\n is_close = np.isclose(x[0], 0.0)\n return is_close\n\n\ndef right(x):\n is_close = np.isclose(x[0], Lx)\n return is_close\n\n\nleft_facets = dolfinx.mesh.locate_entities_boundary(mesh,\n mesh.topology.dim - 1,\n left)\nleft_dofs = dolfinx.fem.locate_dofs_topological(V, mesh.topology.dim - 1,\n left_facets)\n\nright_facets = dolfinx.mesh.locate_entities_boundary(mesh,\n mesh.topology.dim - 1,\n left)\nright_dofs = dolfinx.fem.locate_dofs_topological(V, mesh.topology.dim - 1,\n right_facets)\n\nbcs = [dirichletbc(zero, left_dofs), dirichletbc(one, right_dofs)]\n\nu = Function(V)\nenergy = (ell * ufl.inner(ufl.grad(u), ufl.grad(u)) + u / ell) * ufl.dx\ndenergy = ufl.derivative(energy, u, ufl.TestFunction(V))\nddenergy = ufl.derivative(denergy, u, ufl.TrialFunction(V))\n\nproblem = SNESSolver(\n denergy,\n u,\n bcs,\n bounds=(zero, one),\n petsc_options=parameters.get(\"solvers\").get(\"damage\").get(\"snes\"),\n prefix=\"vi\",\n)\n\nsolver_snes = problem.solver\nsolver_snes.setType(\"vinewtonrsls\")\n\nsolver_snes.setTolerances(rtol=1.0e-8, max_it=250)\nsolver_snes.getKSP().setType(\"preonly\")\nsolver_snes.getKSP().setTolerances(rtol=1.0e-8)\nsolver_snes.getKSP().getPC().setType(\"lu\")\n\n\ndef monitor(snes, its, fgnorm):\n print(f\"Iteration {its:d}, error: {fgnorm:2.3e}\")\n\n\nsolver_snes.setMonitor(monitor)\nsolver_snes.solve(None, u.vector)\n# solver_snes.view()\n\nfrom pathlib import Path\nPath(\"output\").mkdir(parents=True, exist_ok=True)\n\nwith dolfinx.io.XDMFFile(MPI.COMM_WORLD, \"output/u.xdmf\", \"w\") as f:\n f.write_mesh(mesh)\n f.write_function(u)\n\nimport pyvista\nfrom pyvista.utilities import xvfb\n\nimport dolfinx.plot\n\nsys.path.append(\"../../test\")\nfrom test_viz import plot_vector, plot_scalar, plot_profile\n\nxvfb.start_xvfb(wait=0.05)\npyvista.OFF_SCREEN = True\n\nplotter = pyvista.Plotter(\n title=\"Test VI\",\n window_size=[800, 600],\n shape=(1, 1),\n)\n_props = {\"show_edges\":True, \"show_scalar_bar\": True, \"clim\":[0, 1]}\n_plt = plot_scalar(u, plotter, subplot=(0, 0), lineproperties=_props)\n\n# _plt = plot_vector(u, plotter, subplot=(0, 1))\n\n_plt.screenshot(f\"./output/test_vi_MPI{MPI.COMM_WORLD.size}.png\")\n\nif not pyvista.OFF_SCREEN:\n plotter.show()\n\ntol = 1e-3\nxs = np.linspace(0 + tol, Lx - tol, 101)\npoints = np.zeros((3, 101))\npoints[0] = xs\n\n_plt, data = plot_profile(\n u,\n points,\n plotter,\n subplot=(0, 0),\n lineproperties={\n \"c\": \"k\",\n \"label\": f\"$u_\\ell$ with $\\ell$ = {ell:.2f}\"\n },\n)\nax = _plt.gca()\nax.axvline(0.0, c=\"k\")\nax.axvline(2 * ell, c=\"k\", label='D=$2\\ell$')\n_plt.legend()\n_plt.fill_between(data[0], data[1].reshape(len(data[1])))\n_plt.title(\"Variational Inequality\")\n_plt.savefig(f\"./output/test_vi_profile_MPI{MPI.COMM_WORLD.size}-{ell:.3f}.png\")\n\n\nimport pdb\n\npdb.set_trace()\nfrom dolfinx.fem.assemble import assemble_scalar\n\nmin_en = assemble_scalar(dolfinx.fem.form(energy))" ]
[ [ "numpy.zeros", "numpy.linspace", "numpy.isclose" ] ]
appleface2050/Coursera-ML
[ "e588fa5776a79d6516b2135124898a2db9da82ae", "e588fa5776a79d6516b2135124898a2db9da82ae" ]
[ "johnwittenauer/src/simple_linear_regression.py", "mr_code/linear_svm.py" ]
[ "# coding=utf-8\n\"\"\"\nhttp://www.johnwittenauer.net/machine-learning-exercises-in-python-part-1/\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom util.lib import computeCost, gradient_descent\n\n\npath = os.getcwd() + \"\\data\\ex1data1.txt\"\ndata = pd.read_csv(path, header=None, names=['Population', 'Profit'])\nprint(data.head())\nprint(data.describe())\n\ndata.plot(kind='scatter', x=\"Population\", y=\"Profit\", figsize=(12, 8))\n\n\n# plt.show()\n\n\n\n# append a ones column to the front of the data set\ndata.insert(0, 'Ones', 1)\n\nprint(data.head())\n\n# set X (training data) and y (target variable)\ncols = data.shape[1]\nX = data.iloc[:, 0:cols - 1]\ny = data.iloc[:, cols - 1:cols]\n\n# convert from data frames to numpy matrices\nX = np.matrix(X.values)\ny = np.matrix(y.values)\n# theta = np.matrix(np.array([0,0]))\ntheta = np.matrix(np.zeros(X.shape[1]))\ntheta = theta.T\nprint(X.shape, theta.shape, y.shape)\n\nerror = computeCost(X, y, theta)\nprint(\"error:\", error)\n\niters = 20000\n\ng, cost, final_cost = gradient_descent(X, y, theta, 0.01, iters)\nprint(g)\nprint(final_cost)\n\n\nfig, ax = plt.subplots(figsize=(12,8))\nax.plot(X[:,1], (g[0, 0] + (g[1, 0] * X[:,1])), 'r', label='Prediction')\n\nax.scatter(data.Population, data.Profit, label='Traning Data')\nax.legend(loc=2)\nax.set_xlabel('Population')\nax.set_ylabel('Profit')\nax.set_title('Predicted Profit vs. Population Size')\n\n\nfig, ax = plt.subplots(figsize=(12,8))\nax.plot(np.arange(iters), cost, 'r')\nax.set_xlabel('Iterations')\nax.set_ylabel('Cost')\nax.set_title('Error vs. Training Epoch')\nplt.show()\n\n\n", "# coding:utf-8\n\nimport numpy as np\nimport pandas as pd\nimport sklearn.svm\nimport seaborn as sns\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\n\nmat = sio.loadmat('data/ex6data1.mat')\n\nprint(mat.keys())\ndata = pd.DataFrame(mat.get('X'), columns=['X1', 'X2'])\ndata['y'] = mat.get(\"y\")\nprint(data.head())\n# print(data.describe())\n\n\nfig, ax = plt.subplots(figsize=(8,6))\n# ax.scatter(data['X1'], data['X2'], s=50, c=data['y'], cmap='Reds')\nax.scatter(data['X1'], data['X2'], s=55, c=data['y'])\nax.set_title('Raw data')\nax.set_xlabel('X1')\nax.set_ylabel('X2')\n\n# plt.show()\n\nsvc1 = sklearn.svm.LinearSVC(C=1, loss='hinge')\nsvc1.fit(data[['X1', 'X2']], data['y'])\nprint (svc1.score(data[['X1', 'X2']], data['y']))\n\ndata['SVM1 Confidence'] = svc1.decision_function(data[['X1', 'X2']])\n\nfig, ax = plt.subplots(figsize=(8,6))\nax.scatter(data['X1'], data['X2'], s=50, c=data['SVM1 Confidence'], cmap='RdBu')\nax.set_title('SVM (C=1) Decision Confidence')\n\n\nsvc100 = sklearn.svm.LinearSVC(C=90, loss='hinge')\nsvc100.fit(data[['X1', 'X2']], data['y'])\nsvc100.score(data[['X1', 'X2']], data['y'])\ndata['SVM100 Confidence'] = svc100.decision_function(data[['X1', 'X2']])\n\nprint(data)\n\nfig, ax = plt.subplots(figsize=(8,6))\nax.scatter(data['X1'], data['X2'], s=50, c=data['SVM100 Confidence'], cmap='RdBu')\nax.set_title('SVM (C=100) Decision Confidence')\n\nplt.show()\n\n\n\n\n" ]
[ [ "numpy.matrix", "pandas.read_csv", "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "numpy.zeros" ], [ "matplotlib.pyplot.show", "scipy.io.loadmat", "matplotlib.pyplot.subplots" ] ]
docking-org/rdk
[ "373a89021e478f878c6011a201e3fb8f4a122093" ]
[ "rdkit/ML/AnalyzeComposite.py" ]
[ "# $Id$\n#\n# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC\n#\n# @@ All Rights Reserved @@\n# This file is part of the RDKit.\n# The contents are covered by the terms of the BSD license\n# which is included in the file license.txt, found at the root\n# of the RDKit source tree.\n#\n\"\"\" command line utility to report on the contributions of descriptors to\ntree-based composite models\n\nUsage: AnalyzeComposite [optional args] <models>\n\n <models>: file name(s) of pickled composite model(s)\n (this is the name of the db table if using a database)\n\n Optional Arguments:\n\n -n number: the number of levels of each model to consider\n\n -d dbname: the database from which to read the models\n\n -N Note: the note string to search for to pull models from the database\n\n -v: be verbose whilst screening\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nimport numpy\n\nfrom rdkit.Dbase.DbConnection import DbConnect\nfrom rdkit.ML import ScreenComposite\nfrom rdkit.ML.Data import Stats\nfrom rdkit.ML.DecTree import TreeUtils, Tree\nfrom rdkit.six.moves import cPickle\n\n\n__VERSION_STRING = \"2.2.0\"\n\n\ndef ProcessIt(composites, nToConsider=3, verbose=0):\n composite = composites[0]\n nComposites = len(composites)\n ns = composite.GetDescriptorNames()\n # nDesc = len(ns)-2\n if len(ns) > 2:\n globalRes = {}\n\n nDone = 1\n descNames = {}\n for composite in composites:\n if verbose > 0:\n print('#------------------------------------')\n print('Doing: ', nDone)\n nModels = len(composite)\n nDone += 1\n res = {}\n for i in range(len(composite)):\n model = composite.GetModel(i)\n if isinstance(model, Tree.TreeNode):\n levels = TreeUtils.CollectLabelLevels(model, {}, 0, nToConsider)\n TreeUtils.CollectDescriptorNames(model, descNames, 0, nToConsider)\n for descId in levels.keys():\n v = res.get(descId, numpy.zeros(nToConsider, numpy.float))\n v[levels[descId]] += 1. / nModels\n res[descId] = v\n for k in res:\n v = globalRes.get(k, numpy.zeros(nToConsider, numpy.float))\n v += res[k] / nComposites\n globalRes[k] = v\n if verbose > 0:\n for k in res.keys():\n name = descNames[k]\n strRes = ', '.join(['%4.2f' % x for x in res[k]])\n print('%s,%s,%5.4f' % (name, strRes, sum(res[k])))\n\n print()\n\n if verbose >= 0:\n print('# Average Descriptor Positions')\n retVal = []\n for k in globalRes:\n name = descNames[k]\n if verbose >= 0:\n strRes = ', '.join(['%4.2f' % x for x in globalRes[k]])\n print('%s,%s,%5.4f' % (name, strRes, sum(globalRes[k])))\n tmp = [name]\n tmp.extend(globalRes[k])\n tmp.append(sum(globalRes[k]))\n retVal.append(tmp)\n if verbose >= 0:\n print()\n else:\n retVal = []\n return retVal\n\n\ndef ErrorStats(conn, where, enrich=1):\n fields = ('overall_error,holdout_error,overall_result_matrix,' +\n 'holdout_result_matrix,overall_correct_conf,overall_incorrect_conf,' +\n 'holdout_correct_conf,holdout_incorrect_conf')\n try:\n data = conn.GetData(fields=fields, where=where)\n except Exception:\n import traceback\n traceback.print_exc()\n return None\n nPts = len(data)\n if not nPts:\n sys.stderr.write('no runs found\\n')\n return None\n overall = numpy.zeros(nPts, numpy.float)\n overallEnrich = numpy.zeros(nPts, numpy.float)\n oCorConf = 0.0\n oInCorConf = 0.0\n holdout = numpy.zeros(nPts, numpy.float)\n holdoutEnrich = numpy.zeros(nPts, numpy.float)\n hCorConf = 0.0\n hInCorConf = 0.0\n overallMatrix = None\n holdoutMatrix = None\n for i in range(nPts):\n if data[i][0] is not None:\n overall[i] = data[i][0]\n oCorConf += data[i][4]\n oInCorConf += data[i][5]\n if data[i][1] is not None:\n holdout[i] = data[i][1]\n haveHoldout = 1\n else:\n haveHoldout = 0\n tmpOverall = 1. * eval(data[i][2])\n if enrich >= 0:\n overallEnrich[i] = ScreenComposite.CalcEnrichment(tmpOverall, tgt=enrich)\n if haveHoldout:\n tmpHoldout = 1. * eval(data[i][3])\n if enrich >= 0:\n holdoutEnrich[i] = ScreenComposite.CalcEnrichment(tmpHoldout, tgt=enrich)\n if overallMatrix is None:\n if data[i][2] is not None:\n overallMatrix = tmpOverall\n if haveHoldout and data[i][3] is not None:\n holdoutMatrix = tmpHoldout\n else:\n overallMatrix += tmpOverall\n if haveHoldout:\n holdoutMatrix += tmpHoldout\n if haveHoldout:\n hCorConf += data[i][6]\n hInCorConf += data[i][7]\n\n avgOverall = sum(overall) / nPts\n oCorConf /= nPts\n oInCorConf /= nPts\n overallMatrix /= nPts\n oSort = numpy.argsort(overall)\n oMin = overall[oSort[0]]\n overall -= avgOverall\n devOverall = numpy.sqrt(sum(overall**2) / (nPts - 1))\n res = {}\n res['oAvg'] = 100 * avgOverall\n res['oDev'] = 100 * devOverall\n res['oCorrectConf'] = 100 * oCorConf\n res['oIncorrectConf'] = 100 * oInCorConf\n res['oResultMat'] = overallMatrix\n res['oBestIdx'] = oSort[0]\n res['oBestErr'] = 100 * oMin\n\n if enrich >= 0:\n mean, dev = Stats.MeanAndDev(overallEnrich)\n res['oAvgEnrich'] = mean\n res['oDevEnrich'] = dev\n\n if haveHoldout:\n avgHoldout = sum(holdout) / nPts\n hCorConf /= nPts\n hInCorConf /= nPts\n holdoutMatrix /= nPts\n hSort = numpy.argsort(holdout)\n hMin = holdout[hSort[0]]\n holdout -= avgHoldout\n devHoldout = numpy.sqrt(sum(holdout**2) / (nPts - 1))\n res['hAvg'] = 100 * avgHoldout\n res['hDev'] = 100 * devHoldout\n res['hCorrectConf'] = 100 * hCorConf\n res['hIncorrectConf'] = 100 * hInCorConf\n res['hResultMat'] = holdoutMatrix\n res['hBestIdx'] = hSort[0]\n res['hBestErr'] = 100 * hMin\n if enrich >= 0:\n mean, dev = Stats.MeanAndDev(holdoutEnrich)\n res['hAvgEnrich'] = mean\n res['hDevEnrich'] = dev\n return res\n\n\ndef ShowStats(statD, enrich=1):\n statD = statD.copy()\n statD['oBestIdx'] = statD['oBestIdx'] + 1\n txt = \"\"\"\n# Error Statistics:\n\\tOverall: %(oAvg)6.3f%% (%(oDev)6.3f) %(oCorrectConf)4.1f/%(oIncorrectConf)4.1f\n\\t\\tBest: %(oBestIdx)d %(oBestErr)6.3f%%\"\"\" % (statD)\n if 'hAvg' in statD:\n statD['hBestIdx'] = statD['hBestIdx'] + 1\n txt += \"\"\"\n\\tHoldout: %(hAvg)6.3f%% (%(hDev)6.3f) %(hCorrectConf)4.1f/%(hIncorrectConf)4.1f\n\\t\\tBest: %(hBestIdx)d %(hBestErr)6.3f%%\n \"\"\" % (statD)\n print(txt)\n print()\n print('# Results matrices:')\n print('\\tOverall:')\n tmp = numpy.transpose(statD['oResultMat'])\n colCounts = sum(tmp)\n rowCounts = sum(tmp, 1)\n for i in range(len(tmp)):\n if rowCounts[i] == 0:\n rowCounts[i] = 1\n row = tmp[i]\n print('\\t\\t', end='')\n for j in range(len(row)):\n print('% 6.2f' % row[j], end='')\n print('\\t| % 4.2f' % (100. * tmp[i, i] / rowCounts[i]))\n print('\\t\\t', end='')\n for i in range(len(tmp)):\n print('------', end='')\n print()\n print('\\t\\t', end='')\n for i in range(len(tmp)):\n if colCounts[i] == 0:\n colCounts[i] = 1\n print('% 6.2f' % (100. * tmp[i, i] / colCounts[i]), end='')\n print()\n if enrich > -1 and 'oAvgEnrich' in statD:\n print('\\t\\tEnrich(%d): %.3f (%.3f)' % (enrich, statD['oAvgEnrich'], statD['oDevEnrich']))\n\n if 'hResultMat' in statD:\n print('\\tHoldout:')\n tmp = numpy.transpose(statD['hResultMat'])\n colCounts = sum(tmp)\n rowCounts = sum(tmp, 1)\n for i in range(len(tmp)):\n if rowCounts[i] == 0:\n rowCounts[i] = 1\n row = tmp[i]\n print('\\t\\t', end='')\n for j in range(len(row)):\n print('% 6.2f' % row[j], end='')\n print('\\t| % 4.2f' % (100. * tmp[i, i] / rowCounts[i]))\n print('\\t\\t', end='')\n for i in range(len(tmp)):\n print('------', end='')\n print()\n print('\\t\\t', end='')\n for i in range(len(tmp)):\n if colCounts[i] == 0:\n colCounts[i] = 1\n print('% 6.2f' % (100. * tmp[i, i] / colCounts[i]), end='')\n print()\n if enrich > -1 and 'hAvgEnrich' in statD:\n print('\\t\\tEnrich(%d): %.3f (%.3f)' % (enrich, statD['hAvgEnrich'], statD['hDevEnrich']))\n\n return\n\n\ndef Usage():\n print(__doc__)\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n import getopt\n try:\n args, extras = getopt.getopt(sys.argv[1:], 'n:d:N:vX', ('skip',\n 'enrich=', ))\n except Exception:\n Usage()\n\n count = 3\n db = None\n note = ''\n verbose = 0\n skip = 0\n enrich = 1\n for arg, val in args:\n if arg == '-n':\n count = int(val) + 1\n elif arg == '-d':\n db = val\n elif arg == '-N':\n note = val\n elif arg == '-v':\n verbose = 1\n elif arg == '--skip':\n skip = 1\n elif arg == '--enrich':\n enrich = int(val)\n composites = []\n if db is None:\n for arg in extras:\n composite = cPickle.load(open(arg, 'rb'))\n composites.append(composite)\n else:\n tbl = extras[0]\n conn = DbConnect(db, tbl)\n if note:\n where = \"where note='%s'\" % (note)\n else:\n where = ''\n if not skip:\n pkls = conn.GetData(fields='model', where=where)\n composites = []\n for pkl in pkls:\n pkl = str(pkl[0])\n comp = cPickle.loads(pkl)\n composites.append(comp)\n\n if len(composites):\n ProcessIt(composites, count, verbose=verbose)\n elif not skip:\n print('ERROR: no composite models found')\n sys.exit(-1)\n\n if db:\n res = ErrorStats(conn, where, enrich=enrich)\n if res:\n ShowStats(res)\n" ]
[ [ "numpy.argsort", "numpy.zeros", "numpy.transpose" ] ]
torebutlin/pydvma
[ "20e941b0834cbf034d5c7002a3862d4ca335ba12" ]
[ "pydvma/oscilloscope.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 3 11:27:29 2018\n\n@authors: ae407, tb267\n\"\"\" \nimport sys\n\nfrom . import options\nfrom . import file\nfrom . import datastructure\nfrom . import streams\n\nimport numpy as np\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport time\nimport datetime\n\n\nclass Oscilloscope():\n def __init__(self, settings):\n '''Creates an Oscilloscope\n Args:\n settings: An object of the class MySettings\n '''\n\n self.settings = settings\n \n streams.start_stream(settings)\n self.rec = streams.REC\n \n \n# self.rec = streams.Rec_NI(settings).rec\n# self.rec.init_stream(settings)\n \n# if streams.rec_NI is None:\n# streams.rec_NI = streams.Recorder_NI(settings)\n# streams.rec_NI.init_stream(settings)\n# else:\n# try:\n# streams.rec_NI.end_stream()\n# except:\n# pass\n# streams.rec_NI = None\n# streams.rec_NI = streams.Recorder_NI(settings)\n# streams.rec_NI.init_stream(settings)\n# self.rec = streams.rec_NI\n \n \n\n self.timer = QtCore.QTimer()\n self.create_figure()\n\n self.win.sigKeyPress.connect(self.keyPressed)\n self.win.sigClose.connect(self.on_close)\n\n # Start the update timer\n self.timer.timeout.connect(self.update) # update figure and buffer\n self.timer.start(60)\n\n def create_figure(self):\n '''\n Creates a figure which is an object of the class KeyPressWindow.\n\n '''\n pg.setConfigOption('background', 'w')\n self.win = KeyPressWindow()\n self.win.setWindowIcon(QtGui.QIcon('icon.png'))\n# window_geometry = self.win.geometry()\n self.win.setGeometry(100,100,800,600)\n self.win.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)\n \n # This ensures the window appears at front.\n self.win.showMinimized()\n self.win.showNormal()\n \n\n self.win.setWindowTitle(\"Oscilloscope ('s': save new, 'space': autosave, 'p': pause, 'a': always top, 'y': autoscale)\")\n self.view_time = self.settings.init_view_time\n self.view_freq = self.settings.init_view_freq\n self.view_levels = self.settings.init_view_levels\n\n self.toggle_view()\n \n self.auto_scale = False\n\n self.data_saved_counter = 0 # to indicate not yet saved file\n\n def on_close(self, evt):\n self.timer.stop()\n self.rec.end_stream()\n\n def toggle_view(self):\n '''\n Switches between views, triggered by keypress\n '''\n self.win.clear()\n\n if self.view_time:\n self.time_plot()\n\n if self.view_freq:\n self.freq_plot()\n\n if self.view_levels:\n self.levels_plot()\n\n def time_plot(self):\n # create a plot for the time domain\n self.view_time = True\n self.win.nextRow()\n self.osc_time_line = self.win.addPlot(title=\"Time Domain (toggle with 'T')\")\n\n if self.settings.channels == 1:\n self.osc_time_line.enableAutoRange()\n else:\n # Stack the channels -- channel 0 is centred on 0, channel 1\n # centred on 1 etc.\n self.osc_time_line.setYRange(-1,self.settings.channels)\n\n self.osc_time_line.setXRange(self.rec.osc_time_axis[0],\n self.rec.osc_time_axis[-1])\n self.osc_time_line.showGrid(True, True)\n self.osc_time_line.addLegend()\n self.osc_time_line.setLabel('left', 'Normalised Amplitude')\n self.osc_time_line.setLabel('bottom', 'Time (s)')\n\n ax = self.osc_time_line.getAxis('left')\n ax.setTickSpacing(1, 1)\n\n self.osc_time_lineset = {}\n for i in range(self.settings.channels):\n pen_ = pg.mkPen(color=options.set_plot_colours(self.settings.channels)[i,:])\n self.osc_time_lineset[i] = self.osc_time_line.plot(\n pen=pen_, name='Channel %d' % i)\n\n# self.win.FillBetweenItem(curve1=osc_time_lineset[0], curve2=osc_time_lineset[1])\n\n def freq_plot(self):\n # create a plot for the frequency domain\n self.view_freq = True\n self.win.nextRow()\n self.osc_freq_line = self.win.addPlot(\n title=\"Frequency Domain (toggle with 'F')\")\n self.osc_freq_line.enableAutoRange()\n self.osc_freq_line.setXRange(self.rec.osc_freq_axis[0],\n self.rec.osc_freq_axis[-1])\n self.osc_freq_line.showGrid(True, True)\n self.osc_freq_line.addLegend()\n self.osc_freq_line.setLabel('left', 'Power Spectrum (dB)')\n self.osc_freq_line.setLabel('bottom', 'Frequency (Hz)')\n\n self.osc_freq_lineset = {}\n for i in range(self.settings.channels):\n pen_ = pg.mkPen(color=options.set_plot_colours(self.settings.channels)[i,:])\n self.osc_freq_lineset[i] = self.osc_freq_line.plot(\n pen=pen_, name='Channel %d' % i)\n\n def levels_plot(self):\n # create a plot for the frequency domain\n self.view_levels = True\n self.win.nextRow()\n self.osc_levels_line = self.win.addPlot(title=\"Channel Levels (toggle with 'L')\")\n self.osc_levels_line.setYRange(0, 1)\n self.osc_levels_line.setXRange(-0.5, self.settings.channels - 0.5)\n self.osc_levels_line.showGrid(False, True)\n self.osc_levels_line.setLabel('left', 'Normalised Amplitude')\n self.osc_levels_line.setLabel('bottom', 'Channel Index')\n\n ax = self.osc_levels_line.getAxis('bottom')\n ax.setTickSpacing(1, 1)\n# ax.showLabel(show=True)\n# self.osc_levels_line.setTicks(np.arange(self.settings.channels))\n self.osc_levels_lineset={}\n for i in range(self.settings.channels):\n pen_ = pg.mkPen(color=options.set_plot_colours(self.settings.channels)[i,:],width=3)\n pen_peak = pg.mkPen(color=options.set_plot_colours(self.settings.channels)[i,:],width=3)\n self.osc_levels_lineset[i]=self.osc_levels_line.plot(pen=pen_, name='vertical')\n self.osc_levels_lineset[self.settings.channels+i]=self.osc_levels_line.plot(pen=pen_, name='top')\n self.osc_levels_lineset[2*self.settings.channels+i]=self.osc_levels_line.plot(pen=pen_peak, name='peak hold')\n# self.osc_levels_lineset[3]=self.osc_levels_line.plot(pen=pen_, name='Channel')\n\n self.osc_levels_peak_hold = np.zeros(self.settings.channels)\n self.time_last_changed = np.zeros(self.settings.channels)\n\n def update(self):\n '''\n Updates plots with incoming data from __call__.\n Called with a 0s interval by QTimer.\n\n '''\n time_data_snapshot = np.copy(self.rec.osc_time_data)\n if self.view_levels == True:\n self.osc_levels_rms = np.sqrt(np.mean(time_data_snapshot**2,axis=0))\n self.osc_levels_max = np.max(np.abs(time_data_snapshot),axis=0)\n changed_indices = self.osc_levels_peak_hold < self.osc_levels_max\n self.time_last_changed[changed_indices] = time.time()\n self.osc_levels_peak_hold = np.maximum(self.osc_levels_peak_hold,self.osc_levels_max)\n self.osc_levels_peak_hold[time.time()-self.time_last_changed>2] = 0\n\n for i in range(self.settings.channels):\n offset = i\n if self.view_time == True:\n if self.auto_scale is True:\n shift = np.mean(time_data_snapshot[:,i])\n scale_factor = np.max(np.abs(time_data_snapshot[:,i]-shift))*2\n else:\n shift = 0\n scale_factor = 1\n \n self.osc_time_lineset[i].setData(self.rec.osc_time_axis, (time_data_snapshot[:,i]-shift)/scale_factor + offset)\n\n if self.view_freq == True:\n # calculate the FFT\n self.rec.osc_time_data_windowed[:,i] = time_data_snapshot[:,i] * np.blackman(np.shape(time_data_snapshot)[0])\n self.rec.osc_freq_data[:,i] = 20 * np.log10(np.abs(np.fft.rfft(self.rec.osc_time_data_windowed[:,i]))/len(self.rec.osc_time_data_windowed[:,i]))\n self.osc_freq_lineset[i].setData(self.rec.osc_freq_axis,self.rec.osc_freq_data[:,i])\n\n if self.view_levels == True:\n self.osc_levels_lineset[i].setData([i,i],[0,self.osc_levels_max[i]])\n self.osc_levels_lineset[self.settings.channels+i].setData([i-0.3,i+0.3],self.osc_levels_max[i]*np.ones(2))\n\n if self.osc_levels_peak_hold[i] > 0.98:\n pen_peak = pg.mkPen(color=options.set_plot_colours(self.settings.channels)[i,:],width=10)\n else:\n pen_peak = pg.mkPen(color=options.set_plot_colours(self.settings.channels)[i,:],width=3)\n# self.osc_levels_lineset[2*self.settings.channels+i]=self.osc_levels_line.plot(pen=pen_peak, name='peak hold')\n self.osc_levels_lineset[2*self.settings.channels+i].setData([i-0.3,i+0.3],self.osc_levels_peak_hold[i]*np.ones(2),pen=pen_peak)\n# self.osc_levels_lineset[3].setData(np.arange(2),np.ones(2))\n\n\n# #updates for the stored - DONT NEED\n# self.rec.stored_time_data_windowed[:,i] = self.rec.stored_time_data[:,i] * np.blackman(np.shape(self.rec.stored_time_data)[0])\n# self.rec.stored_freq_data[:,i] = 20 * np.log10(np.abs(np.fft.rfft(self.rec.stored_time_data_windowed[:,i]))/len(self.rec.stored_time_data_windowed[:,i]))\n\n #KeyPressed function within osciolloscpe since can only take one argument\n def keyPressed(self, evt):\n '''\n Upon a Space Bar press, makes a copy of data from the past stored_time seconds,plots it in Bokeh and gives the user an option to save it.\n '''\n\n if evt.key() == QtCore.Qt.Key_T:\n\n if self.view_freq != False or self.view_levels != False:\n# print('toggled time domain view')\n self.view_time = not self.view_time\n self.toggle_view()\n# else:\n# print('toggling all views off is prevented')\n\n if evt.key() == QtCore.Qt.Key_F:\n\n if self.view_time != False or self.view_levels != False:\n# print('toggled frequency domain view')\n self.view_freq = not self.view_freq\n self.toggle_view()\n# else:\n# print('toggling all views off is prevented')\n\n if evt.key() == QtCore.Qt.Key_L:\n if self.view_time != False or self.view_freq != False:\n# print('toggled levels view')\n self.view_levels = not self.view_levels\n self.toggle_view()\n# else:\n# print('toggling all views off is prevented')\n \n if evt.key() == QtCore.Qt.Key_P:\n if self.timer.isActive():\n self.timer.stop()\n else:\n self.timer.start()\n \n if evt.key() == QtCore.Qt.Key_A:\n self.win.setWindowFlags(self.win.windowFlags() ^ QtCore.Qt.WindowStaysOnTopHint) \n self.win.show()\n \n \n if evt.key() == QtCore.Qt.Key_Y:\n self.auto_scale = not self.auto_scale \n \n if evt.key() == QtCore.Qt.Key_Space or evt.key() == QtCore.Qt.Key_S:\n\n stored_time_data_copy=np.copy(self.rec.stored_time_data)\n t = datetime.datetime.now()\n timestring = '_'+str(t.year)+'_'+str(t.month)+'_'+str(t.day)+'_at_'+str(t.hour)+'_'+str(t.minute)+'_'+str(t.second)\n# print(\"key press trigger: saving data to file in working directory\")\n\n ### make into dataset\n \n fs=self.settings.fs\n n_samp=len(stored_time_data_copy[:,0])\n dt=1/fs\n t_axis= np.arange(n_samp)*dt\n\n \n timedata = datastructure.TimeData(t_axis,stored_time_data_copy,self.settings,timestamp=t,timestring=timestring,test_name='Test_{}'.format(self.data_saved_counter))\n \n dataset = datastructure.DataSet()\n dataset.add_to_dataset(timedata)\n \n \n if evt.key() == QtCore.Qt.Key_S:\n self.data_saved_counter = 1\n self.last_filename = file.save_data(dataset)\n \n# # this version saves all data as new timedata objects within one file\n# if evt.key() == QtCore.Qt.Key_Space:\n# if self.data_saved_counter == 0:\n# self.last_filename = file.save_data(dataset)\n# if self.last_filename == '':\n# self.data_saved_counter = 0\n# else:\n# self.data_saved_counter += 1\n# \n# else:\n# d = file.load_data(self.last_filename)\n# d.add_to_dataset(timedata)\n# file.save_data(d,self.last_filename,overwrite_without_prompt=True)\n# self.data_saved_counter += 1\n \n # this version saves each new dataset to new file\n if evt.key() == QtCore.Qt.Key_Space:\n if self.data_saved_counter == 0:\n self.last_filename = file.save_data(dataset)\n if self.last_filename == '':\n self.data_saved_counter = 0\n else:\n self.data_saved_counter += 1\n \n else:\n d = datastructure.DataSet()\n d.add_to_dataset(timedata)\n filename = self.last_filename.replace('.npy','_'+str(self.data_saved_counter)+'.npy')\n file.save_data(d,filename,overwrite_without_prompt=True)\n self.data_saved_counter += 1\n\nclass KeyPressWindow(pg.GraphicsWindow):\n '''\n A subclass of pyQtGraph GraphicsWindow that emits a signal when a key is pressed.\n\n '''\n sigKeyPress = QtCore.pyqtSignal(object)\n sigClose = QtCore.pyqtSignal(object)\n\n def __init__(self, *args, **kwargs):\n '''\n Re-implmented from parent.\n '''\n super().__init__(*args, **kwargs)\n\n def keyPressEvent(self, evt):\n '''\n Emits a signal upon a key press\n '''\n self.scene().keyPressEvent(evt)\n self.sigKeyPress.emit(evt)\n\n def closeEvent(self, evt):\n '''\n Emits a signal when the window is closed.\n '''\n self.sigClose.emit(evt)\n self.close()\n" ]
[ [ "numpy.maximum", "numpy.abs", "numpy.fft.rfft", "numpy.arange", "numpy.ones", "numpy.copy", "numpy.mean", "numpy.shape", "numpy.zeros" ] ]
ladsantos/phoenix_pipeline
[ "0befa45e0838a0aeb58efb235a871604919a9755" ]
[ "wavelength_soln.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize as mz\nfrom scipy.optimize import curve_fit as cft\nfrom matplotlib.widgets import TextBox\nfrom matplotlib.widgets import SpanSelector\nfrom matplotlib.widgets import Button\nimport os\nimport utils as utl\n\ndef wave_soln(path, fname):\n \"\"\"\n Parameters:\n -----------\n path : str\n path of the input file\n fname : str\n name of the input telluric file.\n Ideally this should be the file from which cosmic rays \n are removed. 3 columns, pixel, flux, error in flux\n -----------\n returns\n -----------\n popt : array like\n optimized values of parameters of linear fit between pixel \n space and wavelength space.\n pcov : 2x2 matrix\n covariance matrix of optimized values\n -----------\n \"\"\"\n fig, ax = plt.subplots(figsize=(16,12))\n fig.subplots_adjust(bottom=0.2)\n\n pix, fl, fle = np.loadtxt(path + fname, usecols=(0,1,2), unpack=True)\n plts = ax.plot(pix, fl)\n ax.set_title('Press the left mouse button to select the region where the line is.')\n \n new_data = [] # Wavelength data provided by user\n indices = []\n\n # Dummy variables to save data\n new_data1 = []\n indices1 = []\n\n def onselect(pmin, pmax):\n global indices1\n indmin, indmax = np.searchsorted(pix, (pmin, pmax))\n indmax = min(len(pix) - 1, indmax)\n\n ab = [indmin, indmax]\n indices1 = []\n indices1.append(ab)\n\n def submit(expression):\n global new_data1\n ydata = expression\n new_data1 = []\n new_data1.append(ydata)\n\n def enter(self):\n global new_data1, indices1\n new_data.append(new_data1[0])\n indices.append(indices1[0])\n text_box1.set_val(\"\")\n\n # To select a region\n span = SpanSelector(ax, onselect, 'horizontal', useblit=True,\n rectprops=dict(alpha=0.5, facecolor='red'))\n\n # Adding a box to enter values\n axbox1 = fig.add_axes([0.2, 0.05, 0.4, 0.075])\n text_box1 = TextBox(axbox1, \"Enter the corresponding\\n wavelength here (in Angstrom)\\n Then press Calibrate\")\n text_box1.on_submit(submit)\n #text_box1.stop_typing()\n\n # Enter button\n axenter = plt.axes([0.7, 0.05, 0.1, 0.075])\n bnext = Button(axenter, 'Calibrate')\n bnext.on_clicked(enter)\n\n plt.show()\n\n # new_data has values of mid-wavelength of lines in Angstrom\n # indices has indices of the starting and ending point of lines\n # \n # We want to fit a Gaussian to pixel-flux (with flux-error) data\n # to find out mid-wavelength. And then assign this wavelength (in\n # pixel space) to user provided wavelength stored in new_data.\n # \n # mid_pix stores the value of pixel at mid-wavelength in wavelength space\n\n mid_pix = []\n\n for i in range(len(indices)):\n aa = int(indices[i][0])\n bb = int(indices[i][1])\n pix1 = pix[aa:bb]\n fl1 = fl[aa:bb]\n fle1 = fle[aa:bb]\n xinit = np.array([(pix1[0] + pix1[-1])/2, 1, 1, 1])\n def min_log_likelihood(x):\n model = utl.neg_gaus(pix1, x[0], x[1], x[2], x[3])\n chi2 = (fl1 - model)/fle1\n chi22 = np.sum(chi2**2)\n yy = np.sum(np.log(fle1)) + 0.5*chi22\n return yy\n soln = mz(min_log_likelihood, xinit, method='L-BFGS-B')\n mid_pix.append(soln.x[0])\n\n mid_wave_pix = np.asarray(mid_pix)\n mid_wave_lam = np.asarray(new_data)\n\n popt, pcov = cft(utl.line, mid_wave_pix, mid_wave_lam)\n\n return popt, pcov" ]
[ [ "numpy.log", "matplotlib.widgets.Button", "numpy.asarray", "matplotlib.pyplot.subplots", "matplotlib.pyplot.axes", "matplotlib.widgets.TextBox", "scipy.optimize.minimize", "numpy.searchsorted", "numpy.array", "matplotlib.pyplot.show", "scipy.optimize.curve_fit", "numpy.sum", "numpy.loadtxt" ] ]
qnl/Qcodes
[ "ea2c5188f04828c6a76c9cfd9a66509277d7c09f" ]
[ "qcodes/instrument_drivers/stahl/stahl.py" ]
[ "\"\"\"\nThis is a driver for the Stahl power supplies\n\"\"\"\n\nfrom typing import Dict, Optional, Any, Callable, Iterable\nimport re\nimport numpy as np\nimport logging\nfrom collections import OrderedDict\nfrom functools import partial\n\nfrom qcodes import VisaInstrument, InstrumentChannel, ChannelList\nfrom qcodes.utils.validators import Numbers\n\nlogger = logging.getLogger()\n\n\ndef chain(*functions: Callable) -> Callable:\n \"\"\"\n The output of the first callable is piped to the input of the second, etc.\n\n Example:\n >>> def f():\n >>> return \"1.2\"\n >>> chain(f, float)() # return 1.2 as float\n \"\"\"\n\n def make_iter(args):\n if not isinstance(args, Iterable) or isinstance(args, str):\n return args,\n return args\n\n def inner(*args):\n result = args\n for fun in functions:\n new_args = make_iter(result)\n result = fun(*new_args)\n\n return result\n\n return inner\n\n\nclass StahlChannel(InstrumentChannel):\n \"\"\"\n A Stahl source channel\n\n Args:\n parent\n name\n channel_number\n \"\"\"\n\n acknowledge_reply = chr(6)\n\n def __init__(self, parent: VisaInstrument, name: str, channel_number: int):\n super().__init__(parent, name)\n\n self._channel_string = f\"{channel_number:02d}\"\n self._channel_number = channel_number\n # TODO: fix the parser..\n self.add_parameter(\n \"voltage\",\n get_cmd=f\"{self.parent.identifier} Q{self._channel_string}\",\n # get_parser=chain(\n # re.compile(r\" mV$\").findall,\n # partial(re.sub, \",\", \".\"),\n # float\n # ),\n set_cmd=self._set_voltage,\n unit=\"V\",\n vals=Numbers(\n -self.parent.voltage_range,\n self.parent.voltage_range\n )\n )\n\n self.add_parameter(\n \"current\",\n get_cmd=f\"{self.parent.identifier} I{self._channel_string}\",\n get_parser=chain(\n re.compile(r\"^([+\\-]\\d+,\\d+) mA$\").findall,\n partial(re.sub, \",\", \".\"),\n lambda ma: float(ma) / 1000 # Convert mA to A\n ),\n unit=\"A\",\n )\n\n self.add_parameter(\n \"is_locked\",\n get_cmd=self._get_lock_status\n )\n\n def _set_voltage(self, voltage: float) -> None:\n \"\"\"\n Args:\n voltage\n \"\"\"\n # Normalize the voltage in the range 0 to 1, where 0 is maximum negative\n # voltage and 1 is maximum positive voltage\n voltage_normalized = np.interp(\n voltage,\n self.parent.voltage_range * np.array([-1, 1]),\n [0, 1]\n )\n\n send_string = f\"{self.parent.identifier} CH{self._channel_string} \" \\\n f\"{voltage_normalized:.5f}\"\n response = self.ask(send_string)\n\n if response != self.acknowledge_reply:\n self.log.warning(\n f\"Command {send_string} did not produce an acknowledge reply\")\n\n def _get_lock_status(self) -> bool:\n \"\"\"\n A lock occurs when an output is overloaded\n\n Return:\n lock_status: True when locked\n \"\"\"\n send_string = f\"{self.parent.identifier} LOCK\"\n\n response = self.parent.visa_handle.query_binary_values(\n send_string,\n datatype='B',\n header_fmt=\"empty\"\n )\n\n channel_index = self._channel_number - 1\n channel_group = channel_index // 4\n lock_code_group = response[channel_group]\n return format(lock_code_group, \"b\")[channel_index % 4 + 1] == \"1\"\n\n\nclass Stahl(VisaInstrument):\n \"\"\"\n Stahl driver.\n\n Args:\n name\n address: A serial port address\n \"\"\"\n\n def __init__(self, name: str, address: str, **kwargs):\n super().__init__(name, address, terminator=\"\\r\", **kwargs)\n self.visa_handle.baud_rate = 115200\n\n instrument_info = self.parse_idn_string(\n self.ask(\"IDN\")\n )\n\n for key, value in instrument_info.items():\n setattr(self, key, value)\n\n channels = ChannelList(\n self, \"channel\", StahlChannel, snapshotable=False\n )\n\n for channel_number in range(1, self.n_channels + 1):\n name = f\"channel{channel_number}\"\n channel = StahlChannel(\n self,\n name,\n channel_number\n )\n self.add_submodule(name, channel)\n channels.append(channel)\n\n self.add_submodule(\"channel\", channels)\n\n self.add_parameter(\n \"temperature\",\n get_cmd=f\"{self.identifier} TEMP\",\n get_parser=chain(\n re.compile(\"^TEMP (.*)°C$\").findall,\n float\n ),\n unit=\"C\"\n )\n\n self.connect_message()\n\n def ask_raw(self, cmd: str) -> str:\n \"\"\"\n Sometimes the instrument returns non-ascii characters in response\n strings manually adjust the encoding to latin-1\n \"\"\"\n self.visa_log.debug(f\"Querying: {cmd}\")\n self.visa_handle.write(cmd)\n response = self.visa_handle.read(encoding=\"latin-1\")\n self.visa_log.debug(f\"Response: {response}\")\n return response\n\n @staticmethod\n def parse_idn_string(idn_string) -> Dict[str, Any]:\n \"\"\"\n Return:\n dict: The dict contains the following keys \"model\",\n \"serial_number\", \"voltage_range\",\"n_channels\", \"output_type\"\n \"\"\"\n result = re.search(\n r\"(HV|BS)(\\d{3}) (\\d{3}) (\\d{2}) ([buqsm])\",\n idn_string\n )\n\n if result is None:\n raise RuntimeError(\n \"Unexpected instrument response. Perhaps the model of the \"\n \"instrument does not match the drivers expectation or a \"\n \"firmware upgrade has taken place. Please get in touch \"\n \"with a QCoDeS core developer\"\n )\n\n converters: Dict[str, Callable] = OrderedDict({\n \"model\": str,\n \"serial_number\": str,\n \"voltage_range\": float,\n \"n_channels\": int,\n \"output_type\": {\n \"b\": \"bipolar\",\n \"u\": \"unipolar\",\n \"q\": \"quadrupole\",\n \"s\": \"steerer\",\n \"m\": \"bipolar milivolt\"\n }.get\n })\n\n return {\n name: converter(value)\n for (name, converter), value in zip(converters.items(), result.groups())\n }\n\n def get_idn(self) -> Dict[str, Optional[str]]:\n \"\"\"\n The Stahl sends a uncommon IDN string which does not include a\n firmware version.\n \"\"\"\n return {\n \"vendor\": \"Stahl\",\n \"model\": self.model,\n \"serial\": self.serial_number,\n \"firmware\": None\n }\n\n @property\n def identifier(self):\n return f\"{self.model}{self.serial_number}\"\n\n def __del__(self):\n pass\n" ]
[ [ "numpy.array" ] ]
yhr20000319/Informer2020
[ "178d0dc3da54261f381023f9c25e619e2b62911c" ]
[ "data/data_loader.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n# from sklearn.preprocessing import StandardScaler\n\nfrom utils.tools import StandardScaler\nfrom utils.timefeatures import time_features\n\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom sklearn import preprocessing\nclass Dataset_ETT_hour(Dataset):\n def __init__(self, root_path, flag='train', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):\n # size [seq_len, label_len, pred_len]\n # info\n if size == None:\n self.seq_len = 24*4*4\n self.label_len = 24*4\n self.pred_len = 24*4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['train', 'test', 'val']\n type_map = {'train':0, 'val':1, 'test':2}\n self.set_type = type_map[flag]\n\n self.features = features\n self.target = target\n self.scale = scale\n self.inverse = inverse\n self.timeenc = timeenc\n self.freq = freq\n\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n\n border1s = [0, 12*30*24 - self.seq_len, 12*30*24+4*30*24 - self.seq_len]\n border2s = [12*30*24, 12*30*24+4*30*24, 12*30*24+8*30*24]\n border1 = border1s[self.set_type]\n border2 = border2s[self.set_type]\n\n if self.features=='M' or self.features=='MS':\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features=='S':\n df_data = df_raw[[self.target]]\n\n if self.scale:\n train_data = df_data[border1s[0]:border2s[0]]\n self.scaler.fit(train_data.values)\n data = self.scaler.transform(df_data.values)\n else:\n data = df_data.values\n\n df_stamp = df_raw[['date']][border1:border2]\n df_stamp['date'] = pd.to_datetime(df_stamp.date)\n data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)\n\n self.data_x = data[border1:border2]\n if self.inverse:\n self.data_y = df_data.values[border1:border2]\n else:\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n if self.inverse:\n seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)\n else:\n seq_y = self.data_y[r_begin:r_end]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len- self.pred_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)\n\nclass Dataset_ETT_minute(Dataset):\n def __init__(self, root_path, flag='train', size=None,\n features='S', data_path='ETTm1.csv',\n target='OT', scale=True, inverse=False, timeenc=0, freq='t', cols=None):\n # size [seq_len, label_len, pred_len]\n # info\n if size == None:\n self.seq_len = 24*4*4\n self.label_len = 24*4\n self.pred_len = 24*4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['train', 'test', 'val']\n type_map = {'train':0, 'val':1, 'test':2}\n self.set_type = type_map[flag]\n\n self.features = features\n self.target = target\n self.scale = scale\n self.inverse = inverse\n self.timeenc = timeenc\n self.freq = freq\n\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n\n border1s = [0, 12*30*24*4 - self.seq_len, 12*30*24*4+4*30*24*4 - self.seq_len]\n border2s = [12*30*24*4, 12*30*24*4+4*30*24*4, 12*30*24*4+8*30*24*4]\n border1 = border1s[self.set_type]\n border2 = border2s[self.set_type]\n\n if self.features=='M' or self.features=='MS':\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features=='S':\n df_data = df_raw[[self.target]]\n\n if self.scale:\n train_data = df_data[border1s[0]:border2s[0]]\n self.scaler.fit(train_data.values)\n data = self.scaler.transform(df_data.values)\n else:\n data = df_data.values\n\n df_stamp = df_raw[['date']][border1:border2]\n df_stamp['date'] = pd.to_datetime(df_stamp.date)\n data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)\n\n self.data_x = data[border1:border2]\n if self.inverse:\n self.data_y = df_data.values[border1:border2]\n else:\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n if self.inverse:\n seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)\n else:\n seq_y = self.data_y[r_begin:r_end]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)\n\n\nclass Dataset_Custom(Dataset):\n def __init__(self, root_path, flag='train', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):\n # size [seq_len, label_len, pred_len]\n # info\n if size == None:\n self.seq_len = 24*4*4\n self.label_len = 24*4\n self.pred_len = 24*4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['train', 'test', 'val']\n type_map = {'train':0, 'val':1, 'test':2}\n self.set_type = type_map[flag]\n\n self.features = features\n self.target = target\n self.scale = scale\n self.inverse = inverse\n self.timeenc = timeenc\n self.freq = freq\n self.cols=cols\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = preprocessing.StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n '''\n df_raw.columns: ['date', ...(other features), target feature]\n '''\n cols = list(df_raw.columns);\n if self.cols:\n cols=self.cols.copy()\n cols.remove(self.target)\n else:\n cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')\n df_raw = df_raw[['date']+cols+[self.target]]\n\n num_train = int(len(df_raw)*0.7)\n num_test = int(len(df_raw)*0.2)\n num_vali = len(df_raw) - num_train - num_test\n border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]\n border2s = [num_train, num_train+num_vali, len(df_raw)]\n border1 = border1s[self.set_type]\n border2 = border2s[self.set_type]\n\n if self.features=='M' or self.features=='MS':\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features=='S':\n df_data = df_raw[[self.target]]\n\n if self.scale:\n train_data = df_data[border1s[0]:border2s[0]]\n self.scaler.fit(train_data)\n data = self.scaler.transform(df_data)\n else:\n data = df_data.values\n\n df_stamp = df_raw[['date']][border1:border2]\n df_stamp['date'] = pd.to_datetime(df_stamp.date)\n data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)\n\n self.data_x = data[border1:border2]\n if self.inverse:\n self.data_y = df_data.values[border1:border2]\n else:\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n if self.inverse:\n seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)\n else:\n seq_y = self.data_y[r_begin:r_end]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len- self.pred_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)\n\nclass Dataset_Pred(Dataset):\n def __init__(self, root_path, flag='pred', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None):\n # size [seq_len, label_len, pred_len]\n # info\n if size == None:\n self.seq_len = 24*4*4\n self.label_len = 24*4\n self.pred_len = 24*4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['pred']\n\n self.features = features\n self.target = target\n self.scale = scale\n self.inverse = inverse\n self.timeenc = timeenc\n self.freq = freq\n self.cols=cols\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = preprocessing.StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n '''\n df_raw.columns: ['date', ...(other features), target feature]\n '''\n if self.cols:\n cols=self.cols.copy()\n cols.remove(self.target)\n else:\n cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')\n df_raw = df_raw[['date']+cols+[self.target]]\n\n border1 = len(df_raw)-self.seq_len\n border2 = len(df_raw)\n\n if self.features=='M' or self.features=='MS':\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features=='S':\n df_data = df_raw[[self.target]]\n\n if self.scale:\n self.scaler.fit(df_data)\n data = self.scaler.transform(df_data)\n else:\n data = df_data.values\n\n tmp_stamp = df_raw[['date']][border1:border2]\n tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date)\n pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len+1, freq=self.freq)\n\n df_stamp = pd.DataFrame(columns = ['date'])\n df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:])\n data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq[-1:])\n\n self.data_x = data[border1:border2]\n if self.inverse:\n self.data_y = df_data.values[border1:border2]\n else:\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n if self.inverse:\n seq_y = self.data_x[r_begin:r_begin+self.label_len]\n else:\n seq_y = self.data_y[r_begin:r_begin+self.label_len]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "numpy.concatenate", "pandas.date_range", "sklearn.preprocessing.StandardScaler" ] ]
multitel-ai/urban-sound-tagging
[ "a509cd838f4b94484445d175020176971d64cd6c", "a509cd838f4b94484445d175020176971d64cd6c" ]
[ "activation/mish.py", "sub_system2.py" ]
[ "# Code from official repository : https://github.com/digantamisra98/Mish\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\[email protected]\ndef mish(input):\n '''\n Applies the mish function element-wise:\n mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(x)))\n See additional documentation for mish class.\n '''\n return input * torch.tanh(F.softplus(input))\n\nclass Mish(nn.Module):\n '''\n Applies the mish function element-wise:\n mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(x)))\n Shape:\n - Input: (N, *) where * means, any number of additional\n dimensions\n - Output: (N, *), same shape as the input\n Examples:\n >>> m = Mish()\n >>> input = torch.randn(2)\n >>> output = m(input)\n '''\n def __init__(self):\n '''\n Init method.\n '''\n super().__init__()\n\n def forward(self, input):\n '''\n Forward pass of the function.\n '''\n return mish(input)", "import os\nimport argparse\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom training_system2 import DCASETALNetClassifier\n\nfrom tqdm import tqdm\nimport config\nfrom prepare_data.sonycust import SONYCUST_TALNet\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n# Arg Parser\nparser = argparse.ArgumentParser()\nparser.add_argument('--output_mode', type=str, default='both', choices=['coarse', 'fine', 'both'])\nparser.add_argument('--path_to_SONYCUST', type=str, default=config.path_to_SONYCUST)\nparser.add_argument('--path_to_ckpt', type=str, default=os.path.join(config.path_to_summaries, 'TALNetV3_TO23_1/checkpoints/epoch=19-auprc_macro_c=0.801.ckpt'))\nparser.add_argument('--path_to_yaml', type=str, default=os.path.join(config.path_to_summaries, 'TALNetV3_TO23_1/lightning_logs/version_14/hparams_wo_es.yaml'))\nparser.add_argument('--metadata', nargs='+', default=[\"latitude\",\"longitude\",\"week\",\"day\",\"hour\"])\nparser.add_argument('--consensus_threshold', type=float, default=0.0)\nparser.add_argument('--one_hot_time', type=bool, default=False)\nargs = parser.parse_args()\n\n# Dataset parameters\ndata_param={'mode':args.output_mode,\n 'metadata':args.metadata,\n 'one_hot_time':args.one_hot_time, \n 'consensus_threshold':args.consensus_threshold}\n \n# Creating dataset\ndataset = SONYCUST_TALNet(args.path_to_SONYCUST, **data_param)\ntrain_dataset, valid_dataset, test_dataset = dataset.train_validation_test_split()\n\ntest_dataloader = DataLoader(test_dataset, batch_size=64, num_workers=4)\n\n# Creating model\nmodel = DCASETALNetClassifier.load_from_checkpoint(args.path_to_ckpt, hparams_file=args.path_to_yaml)\nmodel.freeze()\nmodel.to('cuda:0')\nprint(\"Number of parameters : \", count_parameters(model))\n\nprint(\"Computing outputs...\")\nfor i_batch, sample_batched in enumerate(tqdm(test_dataloader), 1):\n\n filenames = sample_batched['file_name']\n inputs = sample_batched['input_vector'].float().cuda()\n metas = sample_batched['metadata'].float().cuda()\n # forward + eval\n outputs = model(inputs, metas)[0]\n if i_batch == 1:\n filename_array = [] + filenames\n output_array = np.array(outputs.cpu())\n else:\n filename_array += filenames\n output_array = np.vstack((output_array, outputs.cpu().numpy()))\n\nprint(\"Done\")\n\npred_df = pd.DataFrame(columns=['audio_filename']+dataset.idlabel_dict['coarse']+dataset.idlabel_dict['fine'])\npred_df['audio_filename'] = filename_array\npred_df[dataset.idlabel_dict['coarse']+dataset.idlabel_dict['fine']] = output_array\npred_df.to_csv(os.path.join(config.path_to_SONYCUST, \"Arnault_MULT_task5_2.output.csv\"), index = False, header=True)\n# pred_test_TALNETV3.csv\n\nprint(\"Saved\")" ]
[ [ "torch.nn.functional.softplus" ], [ "torch.utils.data.DataLoader", "pandas.DataFrame" ] ]
OutlierVentures/Stop-Bluffing
[ "b2aa968cc86c47096b945a3d2cef3813dc5662b4" ]
[ "tools/vis.py" ]
[ "import matplotlib.pyplot as plt\nimport os\nimport progressbar\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef vis_many_face_landmarks(landmarks):\n \"\"\"\n Visualises sequence of facial landmarks\n Saves the figures to a directory\n\n :param landmarks: Shape (t, 68, 3)\n :return:\n \"\"\"\n output_dir = 'tmp'\n print(\"Writing output to {}\".format(output_dir))\n bar = progressbar.ProgressBar()\n for t in bar(range(landmarks.shape[0])):\n fig = vis_face_landmarks(landmarks[t, :, :])\n fig.savefig(os.path.join(output_dir, '{}.png'.format(t)))\n\n\ndef vis_face_landmarks(landmarks):\n fig = plt.figure(figsize=plt.figaspect(.5))\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(landmarks[0:17, 0], landmarks[0:17, 1], marker='o', markersize=6, linestyle='-', color='w', lw=2)\n ax.plot(landmarks[17:22, 0], landmarks[17:22, 1], marker='o', markersize=6, linestyle='-', color='w', lw=2)\n ax.plot(landmarks[22:27, 0], landmarks[22:27, 1], marker='o', markersize=6, linestyle='-', color='w', lw=2)\n ax.plot(landmarks[27:31, 0], landmarks[27:31, 1], marker='o', markersize=6, linestyle='-', color='w', lw=2)\n ax.plot(landmarks[31:36, 0], landmarks[31:36, 1], marker='o', markersize=6, linestyle='-', color='w', lw=2)\n ax.plot(landmarks[36:42, 0], landmarks[36:42, 1], marker='o', markersize=6, linestyle='-', color='w', lw=2)\n ax.plot(landmarks[42:48, 0], landmarks[42:48, 1], marker='o', markersize=6, linestyle='-', color='w', lw=2)\n ax.plot(landmarks[48:60, 0], landmarks[48:60, 1], marker='o', markersize=6, linestyle='-', color='w', lw=2)\n ax.plot(landmarks[60:68, 0], landmarks[60:68, 1], marker='o', markersize=6, linestyle='-', color='w', lw=2)\n ax.axis('off')\n\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n surf = ax.scatter(landmarks[:, 0] * 1.2, landmarks[:, 1], landmarks[:, 2], c=\"cyan\", alpha=1.0, edgecolor='b')\n # Face outline\n ax.plot3D(landmarks[:17, 0] * 1.2, landmarks[:17, 1], landmarks[:17, 2], color='green')\n ax.plot3D(landmarks[17:22, 0] * 1.2, landmarks[17:22, 1], landmarks[17:22, 2], color='blue')\n ax.plot3D(landmarks[22:27, 0] * 1.2, landmarks[22:27, 1], landmarks[22:27, 2], color='blue')\n ax.plot3D(landmarks[27:31, 0] * 1.2, landmarks[27:31, 1], landmarks[27:31, 2], color='blue')\n ax.plot3D(landmarks[31:36, 0] * 1.2, landmarks[31:36, 1], landmarks[31:36, 2], color='blue')\n # L Eye\n ax.plot3D(landmarks[36:42, 0] * 1.2, landmarks[36:42, 1], landmarks[36:42, 2], color='blue')\n # R Eye\n ax.plot3D(landmarks[42:48, 0] * 1.2, landmarks[42:48, 1], landmarks[42:48, 2], color='blue')\n # Mouth\n ax.plot3D(landmarks[48:, 0] * 1.2, landmarks[48:, 1], landmarks[48:, 2], color='blue')\n\n ax.view_init(elev=90., azim=90.)\n ax.set_xlim(ax.get_xlim()[::-1])\n\n return fig\n\n\ndef plot_histogram(x):\n \"\"\"\n Visualise range distribution\n :param x:\n :return:\n \"\"\"\n x = x.flatten()\n plt.hist(x)\n plt.show()\n" ]
[ [ "matplotlib.pyplot.figaspect", "matplotlib.pyplot.show", "matplotlib.pyplot.hist" ] ]
chezimany/AirSim
[ "19fea1eac55531c5d5d6ebd91c8101f330f3e549" ]
[ "PythonClient/airsim/types.py" ]
[ "from __future__ import print_function\nimport msgpackrpc #install as admin: pip install msgpack-rpc-python\nimport numpy as np #pip install numpy\nimport math\n\nclass MsgpackMixin:\n def __repr__(self):\n from pprint import pformat\n return \"<\" + type(self).__name__ + \"> \" + pformat(vars(self), indent=4, width=1)\n\n def to_msgpack(self, *args, **kwargs):\n return self.__dict__\n\n @classmethod\n def from_msgpack(cls, encoded):\n obj = cls()\n #obj.__dict__ = {k.decode('utf-8'): (from_msgpack(v.__class__, v) if hasattr(v, \"__dict__\") else v) for k, v in encoded.items()}\n obj.__dict__ = { k : (v if not isinstance(v, dict) else getattr(getattr(obj, k).__class__, \"from_msgpack\")(v)) for k, v in encoded.items()}\n #return cls(**msgpack.unpack(encoded))\n return obj\n\nclass _ImageType(type):\n @property\n def Scene(cls):\n return 0\n def DepthPlanar(cls):\n return 1\n def DepthPerspective(cls):\n return 2\n def DepthVis(cls):\n return 3\n def DisparityNormalized(cls):\n return 4\n def Segmentation(cls):\n return 5\n def SurfaceNormals(cls):\n return 6\n def Infrared(cls):\n return 7\n\n def __getattr__(self, key):\n if key == 'DepthPlanner':\n print('\\033[31m'+\"DepthPlanner has been (correctly) renamed to DepthPlanar. Please use ImageType.DepthPlanar instead.\"+'\\033[0m')\n raise AttributeError\n\nclass ImageType(metaclass=_ImageType):\n Scene = 0\n DepthPlanar = 1\n DepthPerspective = 2\n DepthVis = 3\n DisparityNormalized = 4\n Segmentation = 5\n SurfaceNormals = 6\n Infrared = 7\n\nclass DrivetrainType:\n MaxDegreeOfFreedom = 0\n ForwardOnly = 1\n\nclass LandedState:\n Landed = 0\n Flying = 1\n\nclass WeatherParameter:\n Rain = 0\n Roadwetness = 1\n Snow = 2\n RoadSnow = 3\n MapleLeaf = 4\n RoadLeaf = 5\n Dust = 6\n Fog = 7\n Enabled = 8\n\nclass Vector2r(MsgpackMixin):\n x_val = 0.0\n y_val = 0.0\n\n def __init__(self, x_val = 0.0, y_val = 0.0):\n self.x_val = x_val\n self.y_val = y_val\n\nclass Vector3r(MsgpackMixin):\n x_val = 0.0\n y_val = 0.0\n z_val = 0.0\n\n def __init__(self, x_val = 0.0, y_val = 0.0, z_val = 0.0):\n self.x_val = x_val\n self.y_val = y_val\n self.z_val = z_val\n\n @staticmethod\n def nanVector3r():\n return Vector3r(np.nan, np.nan, np.nan)\n\n def containsNan(self):\n return (math.isnan(self.x_val) or math.isnan(self.y_val) or math.isnan(self.z_val))\n\n def __add__(self, other):\n return Vector3r(self.x_val + other.x_val, self.y_val + other.y_val, self.z_val + other.z_val)\n\n def __sub__(self, other):\n return Vector3r(self.x_val - other.x_val, self.y_val - other.y_val, self.z_val - other.z_val)\n\n def __truediv__(self, other):\n if type(other) in [int, float] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:\n return Vector3r( self.x_val / other, self.y_val / other, self.z_val / other)\n else:\n raise TypeError('unsupported operand type(s) for /: %s and %s' % ( str(type(self)), str(type(other))) )\n\n def __mul__(self, other):\n if type(other) in [int, float] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:\n return Vector3r(self.x_val*other, self.y_val*other, self.z_val*other)\n else:\n raise TypeError('unsupported operand type(s) for *: %s and %s' % ( str(type(self)), str(type(other))) )\n\n def dot(self, other):\n if type(self) == type(other):\n return self.x_val*other.x_val + self.y_val*other.y_val + self.z_val*other.z_val\n else:\n raise TypeError('unsupported operand type(s) for \\'dot\\': %s and %s' % ( str(type(self)), str(type(other))) )\n\n def cross(self, other):\n if type(self) == type(other):\n cross_product = np.cross(self.to_numpy_array(), other.to_numpy_array())\n return Vector3r(cross_product[0], cross_product[1], cross_product[2])\n else:\n raise TypeError('unsupported operand type(s) for \\'cross\\': %s and %s' % ( str(type(self)), str(type(other))) )\n\n def get_length(self):\n return ( self.x_val**2 + self.y_val**2 + self.z_val**2 )**0.5\n\n def distance_to(self, other):\n return ( (self.x_val-other.x_val)**2 + (self.y_val-other.y_val)**2 + (self.z_val-other.z_val)**2 )**0.5\n\n def to_Quaternionr(self):\n return Quaternionr(self.x_val, self.y_val, self.z_val, 0)\n\n def to_numpy_array(self):\n return np.array([self.x_val, self.y_val, self.z_val], dtype=np.float32)\n\n def __iter__(self):\n return iter((self.x_val, self.y_val, self.z_val))\n\nclass Quaternionr(MsgpackMixin):\n w_val = 0.0\n x_val = 0.0\n y_val = 0.0\n z_val = 0.0\n\n def __init__(self, x_val = 0.0, y_val = 0.0, z_val = 0.0, w_val = 1.0):\n self.x_val = x_val\n self.y_val = y_val\n self.z_val = z_val\n self.w_val = w_val\n\n @staticmethod\n def nanQuaternionr():\n return Quaternionr(np.nan, np.nan, np.nan, np.nan)\n\n def containsNan(self):\n return (math.isnan(self.w_val) or math.isnan(self.x_val) or math.isnan(self.y_val) or math.isnan(self.z_val))\n\n def __add__(self, other):\n if type(self) == type(other):\n return Quaternionr( self.x_val+other.x_val, self.y_val+other.y_val, self.z_val+other.z_val, self.w_val+other.w_val )\n else:\n raise TypeError('unsupported operand type(s) for +: %s and %s' % ( str(type(self)), str(type(other))) )\n\n def __mul__(self, other):\n if type(self) == type(other):\n t, x, y, z = self.w_val, self.x_val, self.y_val, self.z_val\n a, b, c, d = other.w_val, other.x_val, other.y_val, other.z_val\n return Quaternionr( w_val = a*t - b*x - c*y - d*z,\n x_val = b*t + a*x + d*y - c*z,\n y_val = c*t + a*y + b*z - d*x,\n z_val = d*t + z*a + c*x - b*y)\n else:\n raise TypeError('unsupported operand type(s) for *: %s and %s' % ( str(type(self)), str(type(other))) )\n\n def __truediv__(self, other):\n if type(other) == type(self):\n return self * other.inverse()\n elif type(other) in [int, float] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:\n return Quaternionr( self.x_val / other, self.y_val / other, self.z_val / other, self.w_val / other)\n else:\n raise TypeError('unsupported operand type(s) for /: %s and %s' % ( str(type(self)), str(type(other))) )\n\n def dot(self, other):\n if type(self) == type(other):\n return self.x_val*other.x_val + self.y_val*other.y_val + self.z_val*other.z_val + self.w_val*other.w_val\n else:\n raise TypeError('unsupported operand type(s) for \\'dot\\': %s and %s' % ( str(type(self)), str(type(other))) )\n\n def cross(self, other):\n if type(self) == type(other):\n return (self * other - other * self) / 2\n else:\n raise TypeError('unsupported operand type(s) for \\'cross\\': %s and %s' % ( str(type(self)), str(type(other))) )\n\n def outer_product(self, other):\n if type(self) == type(other):\n return ( self.inverse()*other - other.inverse()*self ) / 2\n else:\n raise TypeError('unsupported operand type(s) for \\'outer_product\\': %s and %s' % ( str(type(self)), str(type(other))) )\n\n def rotate(self, other):\n if type(self) == type(other):\n if other.get_length() == 1:\n return other * self * other.inverse()\n else:\n raise ValueError('length of the other Quaternionr must be 1')\n else:\n raise TypeError('unsupported operand type(s) for \\'rotate\\': %s and %s' % ( str(type(self)), str(type(other))) )\n\n def conjugate(self):\n return Quaternionr(-self.x_val, -self.y_val, -self.z_val, self.w_val)\n\n def star(self):\n return self.conjugate()\n\n def inverse(self):\n return self.star() / self.dot(self)\n\n def sgn(self):\n return self/self.get_length()\n\n def get_length(self):\n return ( self.x_val**2 + self.y_val**2 + self.z_val**2 + self.w_val**2 )**0.5\n\n def to_numpy_array(self):\n return np.array([self.x_val, self.y_val, self.z_val, self.w_val], dtype=np.float32)\n\n def __iter__(self):\n return iter((self.x_val, self.y_val, self.z_val, self.w_val))\n\nclass Pose(MsgpackMixin):\n position = Vector3r()\n orientation = Quaternionr()\n\n def __init__(self, position_val = None, orientation_val = None):\n position_val = position_val if position_val is not None else Vector3r()\n orientation_val = orientation_val if orientation_val is not None else Quaternionr()\n self.position = position_val\n self.orientation = orientation_val\n\n @staticmethod\n def nanPose():\n return Pose(Vector3r.nanVector3r(), Quaternionr.nanQuaternionr())\n\n def containsNan(self):\n return (self.position.containsNan() or self.orientation.containsNan())\n\n def __iter__(self):\n return iter((self.position, self.orientation))\n\nclass CollisionInfo(MsgpackMixin):\n has_collided = False\n normal = Vector3r()\n impact_point = Vector3r()\n position = Vector3r()\n penetration_depth = 0.0\n time_stamp = 0.0\n object_name = \"\"\n object_id = -1\n\nclass GeoPoint(MsgpackMixin):\n latitude = 0.0\n longitude = 0.0\n altitude = 0.0\n\nclass YawMode(MsgpackMixin):\n is_rate = True\n yaw_or_rate = 0.0\n def __init__(self, is_rate = True, yaw_or_rate = 0.0):\n self.is_rate = is_rate\n self.yaw_or_rate = yaw_or_rate\n\nclass RCData(MsgpackMixin):\n timestamp = 0\n pitch, roll, throttle, yaw = (0.0,)*4 #init 4 variable to 0.0\n switch1, switch2, switch3, switch4 = (0,)*4\n switch5, switch6, switch7, switch8 = (0,)*4\n is_initialized = False\n is_valid = False\n def __init__(self, timestamp = 0, pitch = 0.0, roll = 0.0, throttle = 0.0, yaw = 0.0, switch1 = 0,\n switch2 = 0, switch3 = 0, switch4 = 0, switch5 = 0, switch6 = 0, switch7 = 0, switch8 = 0, is_initialized = False, is_valid = False):\n self.timestamp = timestamp\n self.pitch = pitch\n self.roll = roll\n self.throttle = throttle\n self.yaw = yaw\n self.switch1 = switch1\n self.switch2 = switch2\n self.switch3 = switch3\n self.switch4 = switch4\n self.switch5 = switch5\n self.switch6 = switch6\n self.switch7 = switch7\n self.switch8 = switch8\n self.is_initialized = is_initialized\n self.is_valid = is_valid\n\nclass ImageRequest(MsgpackMixin):\n camera_name = '0'\n image_type = ImageType.Scene\n pixels_as_float = False\n compress = False\n\n def __init__(self, camera_name, image_type, pixels_as_float = False, compress = True):\n # todo: in future remove str(), it's only for compatibility to pre v1.2\n self.camera_name = str(camera_name)\n self.image_type = image_type\n self.pixels_as_float = pixels_as_float\n self.compress = compress\n\n\nclass ImageResponse(MsgpackMixin):\n image_data_uint8 = np.uint8(0)\n image_data_float = 0.0\n camera_position = Vector3r()\n camera_orientation = Quaternionr()\n time_stamp = np.uint64(0)\n message = ''\n pixels_as_float = 0.0\n compress = True\n width = 0\n height = 0\n image_type = ImageType.Scene\n\nclass CarControls(MsgpackMixin):\n throttle = 0.0\n steering = 0.0\n brake = 0.0\n handbrake = False\n is_manual_gear = False\n manual_gear = 0\n gear_immediate = True\n\n def __init__(self, throttle = 0, steering = 0, brake = 0,\n handbrake = False, is_manual_gear = False, manual_gear = 0, gear_immediate = True):\n self.throttle = throttle\n self.steering = steering\n self.brake = brake\n self.handbrake = handbrake\n self.is_manual_gear = is_manual_gear\n self.manual_gear = manual_gear\n self.gear_immediate = gear_immediate\n\n\n def set_throttle(self, throttle_val, forward):\n if (forward):\n self.is_manual_gear = False\n self.manual_gear = 0\n self.throttle = abs(throttle_val)\n else:\n self.is_manual_gear = False\n self.manual_gear = -1\n self.throttle = - abs(throttle_val)\n\nclass KinematicsState(MsgpackMixin):\n position = Vector3r()\n orientation = Quaternionr()\n linear_velocity = Vector3r()\n angular_velocity = Vector3r()\n linear_acceleration = Vector3r()\n angular_acceleration = Vector3r()\n\nclass EnvironmentState(MsgpackMixin):\n position = Vector3r()\n geo_point = GeoPoint()\n gravity = Vector3r()\n air_pressure = 0.0\n temperature = 0.0\n air_density = 0.0\n\nclass CarState(MsgpackMixin):\n speed = 0.0\n gear = 0\n rpm = 0.0\n maxrpm = 0.0\n handbrake = False\n collision = CollisionInfo()\n kinematics_estimated = KinematicsState()\n timestamp = np.uint64(0)\n\nclass MultirotorState(MsgpackMixin):\n collision = CollisionInfo()\n kinematics_estimated = KinematicsState()\n gps_location = GeoPoint()\n timestamp = np.uint64(0)\n landed_state = LandedState.Landed\n rc_data = RCData()\n ready = False\n ready_message = \"\"\n can_arm = False\n\nclass RotorStates(MsgpackMixin):\n timestamp = np.uint64(0)\n rotors = []\n\nclass ProjectionMatrix(MsgpackMixin):\n matrix = []\n\nclass CameraInfo(MsgpackMixin):\n pose = Pose()\n fov = -1\n proj_mat = ProjectionMatrix()\n\nclass LidarData(MsgpackMixin):\n point_cloud = 0.0\n time_stamp = np.uint64(0)\n pose = Pose()\n segmentation = 0\n\nclass ImuData(MsgpackMixin):\n time_stamp = np.uint64(0)\n orientation = Quaternionr()\n angular_velocity = Vector3r()\n linear_acceleration = Vector3r()\n\nclass BarometerData(MsgpackMixin):\n time_stamp = np.uint64(0)\n altitude = Quaternionr()\n pressure = Vector3r()\n qnh = Vector3r()\n\nclass MagnetometerData(MsgpackMixin):\n time_stamp = np.uint64(0)\n magnetic_field_body = Vector3r()\n magnetic_field_covariance = 0.0\n\nclass GnssFixType(MsgpackMixin):\n GNSS_FIX_NO_FIX = 0\n GNSS_FIX_TIME_ONLY = 1\n GNSS_FIX_2D_FIX = 2\n GNSS_FIX_3D_FIX = 3\n\nclass GnssReport(MsgpackMixin):\n geo_point = GeoPoint()\n eph = 0.0\n epv = 0.0\n velocity = Vector3r()\n fix_type = GnssFixType()\n time_utc = np.uint64(0)\n\nclass GpsData(MsgpackMixin):\n time_stamp = np.uint64(0)\n gnss = GnssReport()\n is_valid = False\n\nclass DistanceSensorData(MsgpackMixin):\n time_stamp = np.uint64(0)\n distance = 0.0\n min_distance = 0.0\n max_distance = 0.0\n relative_pose = Pose()\n\nclass Box2D(MsgpackMixin):\n min = Vector2r()\n max = Vector2r()\n\nclass Box3D(MsgpackMixin):\n min = Vector3r()\n max = Vector3r()\n\nclass DetectionInfo(MsgpackMixin):\n name = ''\n geo_point = GeoPoint()\n box2D = Box2D()\n box3D = Box3D()\n relative_pose = Pose()\n \nclass PIDGains():\n \"\"\"\n Struct to store values of PID gains. Used to transmit controller gain values while instantiating\n AngleLevel/AngleRate/Velocity/PositionControllerGains objects.\n\n Attributes:\n kP (float): Proportional gain\n kI (float): Integrator gain\n kD (float): Derivative gain\n \"\"\"\n def __init__(self, kp, ki, kd):\n self.kp = kp\n self.ki = ki\n self.kd = kd\n\n def to_list(self):\n return [self.kp, self.ki, self.kd]\n\nclass AngleRateControllerGains():\n \"\"\"\n Struct to contain controller gains used by angle level PID controller\n\n Attributes:\n roll_gains (PIDGains): kP, kI, kD for roll axis\n pitch_gains (PIDGains): kP, kI, kD for pitch axis\n yaw_gains (PIDGains): kP, kI, kD for yaw axis\n \"\"\"\n def __init__(self, roll_gains = PIDGains(0.25, 0, 0),\n pitch_gains = PIDGains(0.25, 0, 0),\n yaw_gains = PIDGains(0.25, 0, 0)):\n self.roll_gains = roll_gains\n self.pitch_gains = pitch_gains\n self.yaw_gains = yaw_gains\n\n def to_lists(self):\n return [self.roll_gains.kp, self.pitch_gains.kp, self.yaw_gains.kp], [self.roll_gains.ki, self.pitch_gains.ki, self.yaw_gains.ki], [self.roll_gains.kd, self.pitch_gains.kd, self.yaw_gains.kd]\n\nclass AngleLevelControllerGains():\n \"\"\"\n Struct to contain controller gains used by angle rate PID controller\n\n Attributes:\n roll_gains (PIDGains): kP, kI, kD for roll axis\n pitch_gains (PIDGains): kP, kI, kD for pitch axis\n yaw_gains (PIDGains): kP, kI, kD for yaw axis\n \"\"\"\n def __init__(self, roll_gains = PIDGains(2.5, 0, 0),\n pitch_gains = PIDGains(2.5, 0, 0),\n yaw_gains = PIDGains(2.5, 0, 0)):\n self.roll_gains = roll_gains\n self.pitch_gains = pitch_gains\n self.yaw_gains = yaw_gains\n\n def to_lists(self):\n return [self.roll_gains.kp, self.pitch_gains.kp, self.yaw_gains.kp], [self.roll_gains.ki, self.pitch_gains.ki, self.yaw_gains.ki], [self.roll_gains.kd, self.pitch_gains.kd, self.yaw_gains.kd]\n\nclass VelocityControllerGains():\n \"\"\"\n Struct to contain controller gains used by velocity PID controller\n\n Attributes:\n x_gains (PIDGains): kP, kI, kD for X axis\n y_gains (PIDGains): kP, kI, kD for Y axis\n z_gains (PIDGains): kP, kI, kD for Z axis\n \"\"\"\n def __init__(self, x_gains = PIDGains(0.2, 0, 0),\n y_gains = PIDGains(0.2, 0, 0),\n z_gains = PIDGains(2.0, 2.0, 0)):\n self.x_gains = x_gains\n self.y_gains = y_gains\n self.z_gains = z_gains\n\n def to_lists(self):\n return [self.x_gains.kp, self.y_gains.kp, self.z_gains.kp], [self.x_gains.ki, self.y_gains.ki, self.z_gains.ki], [self.x_gains.kd, self.y_gains.kd, self.z_gains.kd]\n\nclass PositionControllerGains():\n \"\"\"\n Struct to contain controller gains used by position PID controller\n\n Attributes:\n x_gains (PIDGains): kP, kI, kD for X axis\n y_gains (PIDGains): kP, kI, kD for Y axis\n z_gains (PIDGains): kP, kI, kD for Z axis\n \"\"\"\n def __init__(self, x_gains = PIDGains(0.25, 0, 0),\n y_gains = PIDGains(0.25, 0, 0),\n z_gains = PIDGains(0.25, 0, 0)):\n self.x_gains = x_gains\n self.y_gains = y_gains\n self.z_gains = z_gains\n\n def to_lists(self):\n return [self.x_gains.kp, self.y_gains.kp, self.z_gains.kp], [self.x_gains.ki, self.y_gains.ki, self.z_gains.ki], [self.x_gains.kd, self.y_gains.kd, self.z_gains.kd]\n\nclass MeshPositionVertexBuffersResponse(MsgpackMixin):\n position = Vector3r()\n orientation = Quaternionr()\n vertices = 0.0\n indices = 0.0\n name = ''\n" ]
[ [ "numpy.uint8", "numpy.array", "numpy.uint64" ] ]
megvii-research/DCLS-SR
[ "969fd3e840e14e65891b4694057b93d9b4aebb57" ]
[ "codes/utils/file_utils.py" ]
[ "import logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom shutil import get_terminal_size\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport yaml\n\ntry:\n from yaml import CDumper as Dumper\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Dumper, Loader\n\n\ndef OrderedYaml():\n _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG\n\n def dict_representer(dumper, data):\n return dumper.represent_dict(data.items())\n\n def dict_constructor(loader, node):\n return OrderedDict(loader.construct_pairs(node))\n\n Dumper.add_representer(OrderedDict, dict_representer)\n Loader.add_constructor(_mapping_tag, dict_constructor)\n return Loader, Dumper\n\n\ndef get_timestamp():\n return datetime.now().strftime(\"%y%m%d-%H%M%S\")\n\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef mkdirs(paths):\n if isinstance(paths, str):\n mkdir(paths)\n else:\n for path in paths:\n mkdir(path)\n\n\ndef mkdir_and_rename(path):\n if os.path.exists(path):\n new_name = path + \"_archived_\" + get_timestamp()\n print(\"Path already exists. Rename it to [{:s}]\".format(new_name))\n logger = logging.getLogger(\"base\")\n logger.info(\"Path already exists. Rename it to [{:s}]\".format(new_name))\n os.rename(path, new_name)\n os.makedirs(path)\n\n\ndef set_random_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n\ndef setup_logger(\n logger_name, root, phase, level=logging.INFO, screen=False, tofile=False\n):\n \"\"\"set up logger\"\"\"\n lg = logging.getLogger(logger_name)\n formatter = logging.Formatter(\n \"%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s\",\n datefmt=\"%y-%m-%d %H:%M:%S\",\n )\n lg.setLevel(level)\n if tofile:\n log_file = os.path.join(root, phase + \"_{}.log\".format(get_timestamp()))\n fh = logging.FileHandler(log_file, mode=\"w\")\n fh.setFormatter(formatter)\n lg.addHandler(fh)\n if screen:\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n lg.addHandler(sh)\n\n\nclass ProgressBar(object):\n \"\"\"A progress bar which can print the progress\n modified from https://github.com/hellock/cvbase/blob/master/cvbase/progress.py\n \"\"\"\n\n def __init__(self, task_num=0, bar_width=50, start=True):\n self.task_num = task_num\n max_bar_width = self._get_max_bar_width()\n self.bar_width = bar_width if bar_width <= max_bar_width else max_bar_width\n self.completed = 0\n if start:\n self.start()\n\n def _get_max_bar_width(self):\n terminal_width, _ = get_terminal_size()\n max_bar_width = min(int(terminal_width * 0.6), terminal_width - 50)\n if max_bar_width < 10:\n print(\n \"terminal width is too small ({}), please consider widen the terminal for better \"\n \"progressbar visualization\".format(terminal_width)\n )\n max_bar_width = 10\n return max_bar_width\n\n def start(self):\n if self.task_num > 0:\n sys.stdout.write(\n \"[{}] 0/{}, elapsed: 0s, ETA:\\n{}\\n\".format(\n \" \" * self.bar_width, self.task_num, \"Start...\"\n )\n )\n else:\n sys.stdout.write(\"completed: 0, elapsed: 0s\")\n sys.stdout.flush()\n self.start_time = time.time()\n\n def update(self, msg=\"In progress...\"):\n self.completed += 1\n elapsed = time.time() - self.start_time\n fps = self.completed / elapsed\n if self.task_num > 0:\n percentage = self.completed / float(self.task_num)\n eta = int(elapsed * (1 - percentage) / percentage + 0.5)\n mark_width = int(self.bar_width * percentage)\n bar_chars = \">\" * mark_width + \"-\" * (self.bar_width - mark_width)\n sys.stdout.write(\"\\033[2F\") # cursor up 2 lines\n sys.stdout.write(\n \"\\033[J\"\n ) # clean the output (remove extra chars since last display)\n sys.stdout.write(\n \"[{}] {}/{}, {:.1f} task/s, elapsed: {}s, ETA: {:5}s\\n{}\\n\".format(\n bar_chars,\n self.completed,\n self.task_num,\n fps,\n int(elapsed + 0.5),\n eta,\n msg,\n )\n )\n else:\n sys.stdout.write(\n \"completed: {}, elapsed: {}s, {:.1f} tasks/s\".format(\n self.completed, int(elapsed + 0.5), fps\n )\n )\n sys.stdout.flush()\n" ]
[ [ "torch.manual_seed", "numpy.random.seed", "torch.cuda.manual_seed_all" ] ]
lixiangyin666/Models
[ "924d3b8b14fadb9aa3279e176a81cd18f88659cc" ]
[ "official/vision/segmentation/tools/inference.py" ]
[ "# -*- coding: utf-8 -*-\n# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nimport argparse\n\nimport cv2\nimport numpy as np\n\nimport megengine as mge\n\nfrom official.vision.segmentation.tools.utils import class_colors, import_from_file\n\nlogger = mge.get_logger(__name__)\nlogger.setLevel(\"INFO\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-f\", \"--file\", default=\"net.py\", type=str, help=\"net description file\"\n )\n parser.add_argument(\n \"-w\", \"--weight_file\", default=None, type=str, help=\"weights file\",\n )\n parser.add_argument(\"-i\", \"--image\", type=str)\n args = parser.parse_args()\n\n current_network = import_from_file(args.file)\n cfg = current_network.Cfg()\n cfg.backbone_pretrained = False\n model = current_network.Net(cfg)\n model.eval()\n\n state_dict = mge.load(args.weight_file)\n if \"state_dict\" in state_dict:\n state_dict = state_dict[\"state_dict\"]\n model.load_state_dict(state_dict)\n\n img = cv2.imread(args.image)\n pred = inference(img, model)\n cv2.imwrite(\"results.jpg\", pred)\n\n\ndef inference(img, model):\n def pred_func(data):\n pred = model(data)\n return pred\n\n img = (\n img.astype(\"float32\") - np.array(model.cfg.img_mean)\n ) / np.array(model.cfg.img_std)\n ori_h, ori_w = img.shape[:2]\n img = cv2.resize(img, (model.cfg.val_height, model.cfg.val_width))\n img = img.transpose(2, 0, 1)[np.newaxis]\n\n pred = pred_func(mge.tensor(img))\n pred = pred.numpy().squeeze().argmax(0)\n pred = cv2.resize(\n pred.astype(\"uint8\"), (ori_w, ori_h), interpolation=cv2.INTER_NEAREST\n )\n\n out = np.zeros((ori_h, ori_w, 3))\n nids = np.unique(pred)\n for t in nids:\n out[pred == t] = class_colors[t]\n return out\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.unique" ] ]
Bekyilma/nebullvm
[ "0d2b2151229a6164da2c8b4a2c4dbfacdf21dede" ]
[ "nebullvm/converters/tensorflow_converters.py" ]
[ "from pathlib import Path\nimport subprocess\nfrom tempfile import TemporaryDirectory\nfrom typing import Union, Tuple, List\n\nimport tensorflow as tf\nimport tf2onnx\n\n\ndef get_outputs_sizes_tf(\n tf_model: Union[tf.Module, tf.keras.Model], input_tensors: List[tf.Tensor]\n) -> List[Tuple[int, ...]]:\n outputs = tf_model(*input_tensors)\n if isinstance(outputs, tf.Tensor):\n return [tuple(tf.shape(outputs))]\n return [tuple(x.size()) for x in outputs]\n\n\ndef convert_tf_to_onnx(\n model: tf.Module,\n output_file_path: Union[str, Path],\n):\n \"\"\"Convert TF models into ONNX.\n\n Args:\n model (tf.Module): TF model.\n input_sizes (List[tuple]): Sizes of the model's input tensors.\n output_file_path (Path): Path where storing the output file.\n \"\"\"\n with TemporaryDirectory() as temp_dir:\n tf.saved_model.save(model, export_dir=temp_dir)\n onnx_cmd = [\n \"python3\",\n \"-m\",\n \"tf2onnx.convert\",\n \"--saved-model\",\n f\"{temp_dir}\",\n \"--output\",\n f\"{output_file_path}\",\n \"--opset\",\n \"11\",\n ]\n subprocess.run(onnx_cmd)\n\n\ndef convert_keras_to_onnx(\n model: tf.keras.Model,\n input_sizes: List[Tuple[int, ...]],\n output_file_path: Union[str, Path],\n):\n \"\"\"Convert keras models into ONNX.\n\n Args:\n model (tf.Module): keras model.\n input_sizes (List[tuple]): Sizes of the model's input tensors.\n output_file_path (Path): Path where storing the output file.\n \"\"\"\n spec = (\n tf.TensorSpec(input_size, tf.float32, name=f\"input_{i}\")\n for i, input_size in enumerate(input_sizes)\n )\n tf2onnx.convert.from_keras(\n model, input_signature=spec, opset=11, output_path=output_file_path\n )\n" ]
[ [ "tensorflow.saved_model.save", "tensorflow.TensorSpec", "tensorflow.shape" ] ]
JEJodesty/cadCAD
[ "70ce381ef4b9325b23dc77785d950203424ebcdd" ]
[ "cadCAD/configuration/__init__.py" ]
[ "from typing import Dict, Callable, List, Tuple\nfrom pandas.core.frame import DataFrame\nfrom datetime import datetime\nfrom collections import deque\nfrom copy import deepcopy\nimport pandas as pd\n\nfrom cadCAD.utils import key_filter\nfrom cadCAD.configuration.utils import exo_update_per_ts, configs_as_objs\nfrom cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates, sanitize_config\n\n\nclass Configuration():\n def __init__(self, user_id, model_id, subset_id, subset_window, sim_config={}, initial_state={}, seeds={}, env_processes={},\n exogenous_states={}, partial_state_update_blocks={}, policy_ops=[lambda a, b: a + b],\n session_id=0, simulation_id=0, run_id=1, experiment_id=0, exp_window=deque([0, None], 2),\n exp_creation_ts=None, **kwargs\n ) -> None:\n self.sim_config = sim_config\n self.initial_state = initial_state\n self.seeds = seeds\n self.env_processes = env_processes\n self.exogenous_states = exogenous_states\n self.partial_state_update_blocks = partial_state_update_blocks\n self.policy_ops = policy_ops\n self.kwargs = kwargs\n\n self.session_id = session_id # essentially config id\n\n self.experiment_id = experiment_id\n self.user_id = user_id\n self.model_id = model_id\n self.exp_creation_ts = exp_creation_ts\n\n self.labeled_jobs = {}\n self.simulation_id = simulation_id\n self.subset_id = subset_id\n self.run_id = run_id\n\n self.exp_window = exp_window\n self.subset_window = subset_window\n\n sanitize_config(self)\n\n\nclass Experiment(object):\n def __init__(self):\n self.exp_creation_ts = str(datetime.utcnow())\n self.configs = []\n self.sys_configs = []\n\n self.model_job_map, self.model_job_counts = {}, {}\n self.model_ids = list(self.model_job_map.keys())\n self.model_id_queue = []\n\n self.exp_id = 0\n self.simulation_id = -1\n self.subset_id = 0\n self.exp_window = deque([self.exp_id, None], 2)\n self.subset_window = deque([self.subset_id, None], 2)\n\n\n def append_model(\n self,\n user_id='cadCAD_user',\n model_id='sys_model_#',\n sim_configs={}, initial_state={}, seeds={}, raw_exogenous_states={}, env_processes={},\n partial_state_update_blocks={}, policy_ops=[lambda a, b: a + b], _exo_update_per_ts: bool = True, **kwargs\n # config_list=deepcopy(global_configs)\n ) -> None:\n _sim_configs = deepcopy(sim_configs)\n # self.configs = config_list\n self.simulation_id += 1\n\n try:\n max_runs = _sim_configs[0]['N']\n except KeyError:\n max_runs = _sim_configs['N']\n\n if _exo_update_per_ts is True:\n exogenous_states = exo_update_per_ts(raw_exogenous_states)\n else:\n exogenous_states = raw_exogenous_states\n\n if isinstance(_sim_configs, dict):\n _sim_configs = [_sim_configs]\n\n sim_cnt_local = 0\n new_sim_configs = []\n for subset_id, t in enumerate(list(zip(_sim_configs, list(range(len(_sim_configs)))))):\n sim_config = t[0]\n sim_config['subset_id'] = subset_id\n sim_config['subset_window'] = self.subset_window\n N = sim_config['N']\n if N > 1:\n for n in range(N):\n sim_config['simulation_id'] = self.simulation_id\n sim_config['run_id'] = n\n sim_config['N'] = 1\n new_sim_configs.append(deepcopy(sim_config))\n del sim_config\n else:\n sim_config['simulation_id'] = self.simulation_id\n sim_config['run_id'] = 0\n new_sim_configs.append(deepcopy(sim_config))\n\n sim_cnt_local += 1\n\n run_id = 0\n new_model_ids, new_configs = [], []\n for sim_config in new_sim_configs:\n subset_id = sim_config['subset_id']\n sim_config['N'] = run_id + 1\n if max_runs == 1:\n sim_config['run_id'] = run_id\n elif max_runs >= 1:\n if run_id >= max_runs:\n sim_config['N'] = run_id - (max_runs - 1)\n\n self.exp_window = deepcopy(self.exp_window)\n config = Configuration(\n exp_creation_ts=self.exp_creation_ts,\n\n sim_config=sim_config,\n initial_state=initial_state,\n seeds=seeds,\n exogenous_states=exogenous_states,\n env_processes=env_processes,\n partial_state_update_blocks=partial_state_update_blocks,\n policy_ops=policy_ops,\n\n # session_id=session_id,\n user_id=user_id,\n model_id=model_id,\n session_id=f\"{user_id}={sim_config['simulation_id']}_{sim_config['run_id']}\",\n\n experiment_id=self.exp_id,\n simulation_id=self.simulation_id,\n subset_id=subset_id,\n run_id=sim_config['run_id'],\n\n exp_window=self.exp_window,\n subset_window=self.subset_window\n )\n\n # self.configs.append(config)\n new_configs.append(config)\n new_model_ids.append(model_id)\n run_id += 1\n self.configs += new_configs\n self.model_id_queue += new_model_ids\n self.exp_id += 1\n self.exp_window.appendleft(self.exp_id)\n self.sys_configs += configs_as_objs(new_configs)\n\n unique_new_model_ids = list(set(new_model_ids))\n new_model_job_list = [(model_id, []) for model_id in unique_new_model_ids]\n for model_id, v in new_model_job_list:\n if model_id not in self.model_ids:\n self.model_job_map[model_id] = v\n self.model_ids.append(model_id)\n else:\n except_str = f\"\"\"\n Error: Duplicate model_id in Experiment - \\'{model_id}\\' in {self.model_ids} \n -- Specify unique model_id for each use of `.append_config` per `Experiment()`\n \"\"\"\n raise Exception(except_str)\n\n for model_id, job in list(zip(new_model_ids, new_configs)):\n self.model_job_map[model_id].append(job)\n\n self.model_job_counts = dict([(k, len(v)) for k, v in self.model_job_map.items()])\n\n append_configs = append_model\n\n\nclass Identity:\n def __init__(self, policy_id: Dict[str, int] = {'identity': 0}) -> None:\n self.beh_id_return_val = policy_id\n\n def p_identity(self, var_dict, sub_step, sL, s, **kwargs):\n return self.beh_id_return_val\n\n def policy_identity(self, k: str) -> Callable:\n return self.p_identity\n\n def no_state_identity(self, var_dict, sub_step, sL, s, _input, **kwargs):\n return None\n\n def state_identity(self, k: str) -> Callable:\n return lambda var_dict, sub_step, sL, s, _input, **kwargs: (k, s[k])\n\n # state_identity = cloudpickle.dumps(state_identity)\n\n def apply_identity_funcs(self,\n identity: Callable,\n df: DataFrame,\n cols: List[str]) -> DataFrame:\n \"\"\"\n Apply the identity on each df column, using its self value as the\n argument.\n \"\"\"\n fill_values = {col: identity(col) for col in cols}\n filled_df = df.fillna(fill_values)\n return filled_df\n\n\nclass Processor:\n def __init__(self, id: Identity = Identity()) -> None:\n self.id = id\n self.p_identity = id.p_identity\n self.policy_identity = id.policy_identity\n self.no_state_identity = id.no_state_identity\n self.state_identity = id.state_identity\n self.apply_identity_funcs = id.apply_identity_funcs\n\n def create_matrix_field(self, partial_state_updates, key: str) -> DataFrame:\n if key == 'variables':\n identity = self.state_identity\n elif key == 'policies':\n identity = self.policy_identity\n\n df = pd.DataFrame(key_filter(partial_state_updates, key))\n filled_df = self.apply_identity_funcs(identity, df, list(df.columns))\n if len(filled_df) > 0:\n return filled_df\n else:\n return pd.DataFrame({'empty': []})\n\n def generate_config(self, initial_state, partial_state_updates, exo_proc\n ) -> List[Tuple[List[Callable], List[Callable]]]:\n\n def no_update_handler(bdf, sdf):\n if (bdf.empty == False) and (sdf.empty == True):\n bdf_values = bdf.values.tolist()\n sdf_values = [[self.no_state_identity] * len(bdf_values) for m in range(len(partial_state_updates))]\n return sdf_values, bdf_values\n elif (bdf.empty == True) and (sdf.empty == False):\n sdf_values = sdf.values.tolist()\n bdf_values = [[self.p_identity] * len(sdf_values) for m in range(len(partial_state_updates))]\n return sdf_values, bdf_values\n else:\n sdf_values = sdf.values.tolist()\n bdf_values = bdf.values.tolist()\n return sdf_values, bdf_values\n\n def only_ep_handler(state_dict):\n sdf_functions = [\n lambda var_dict, sub_step, sL, s, _input, **kwargs: (k, v) for k, v in zip(state_dict.keys(), state_dict.values())\n ]\n sdf_values = [sdf_functions]\n bdf_values = [[self.p_identity] * len(sdf_values)]\n return sdf_values, bdf_values\n\n if len(partial_state_updates) != 0:\n # backwards compatibility\n partial_state_updates = sanitize_partial_state_updates(partial_state_updates)\n\n bdf = self.create_matrix_field(partial_state_updates, 'policies')\n sdf = self.create_matrix_field(partial_state_updates, 'variables')\n sdf_values, bdf_values = no_update_handler(bdf, sdf)\n zipped_list = list(zip(sdf_values, bdf_values))\n else:\n sdf_values, bdf_values = only_ep_handler(initial_state)\n zipped_list = list(zip(sdf_values, bdf_values))\n\n return list(map(lambda x: (x[0] + exo_proc, x[1]), zipped_list))\n" ]
[ [ "pandas.DataFrame" ] ]
daanvanes/pRF_attention_analysis
[ "a1825eaa5290d08c8cafd0487427d62b1512d05c" ]
[ "FitPRFModel.py" ]
[ "# !/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nSession.py\n\nCreated by Tomas HJ Knapen on 2009-11-26.\nCopyright (c) 2009 TK. All rights reserved.\n\"\"\"\n\n# import python packages:\nfrom __future__ import division\nfrom sklearn.linear_model import Ridge\nfrom IPython import embed as shell\nimport numpy as np\nimport pylab as pl\nfrom scipy.stats import pearsonr,spearmanr,kendalltau\nfrom scipy.signal import fftconvolve\nfrom scipy import ndimage\nfrom scipy import interpolate\nfrom lmfit import minimize, Minimizer, Parameters, Parameter, report_fit, report_errors\nfrom skimage.morphology import disk\nfrom matplotlib import cm \nimport seaborn as sn\nfrom sympy.solvers import solve\nfrom sympy import Symbol, exp\nimport hrf_estimation as he\nimport time as t\nimport os\nsn.set(style=\"ticks\")\n\nclass gpf(object):\n\tdef __init__(self, design_matrix, max_eccentricity, n_pixel_elements, ssr, rtime,slice_no):\n\t\tself.design_matrix = design_matrix\n\t\tself.max_eccentricity = max_eccentricity\n\t\tself.n_pixel_elements = n_pixel_elements\n\t\tself.ssr = ssr\n\t\tself.rtime = rtime\t\n\t\tself.slice = slice_no\n\n\t\tX = np.linspace(-max_eccentricity, max_eccentricity, n_pixel_elements)\n\t\tY = np.linspace(-max_eccentricity, max_eccentricity, n_pixel_elements)\n\t\tself.MG = np.meshgrid(X, Y)\n\n\t#define model function and pass independent variables x and y as a list\n\tdef twoD_Gaussian(self, xo, yo, sigma):\n\t\t(x,y) = self.MG\n\t\ttheta=0\n\t\ta = (np.cos(theta)**2)/(2*sigma**2) + (np.sin(theta)**2)/(2*sigma**2)\n\t\tb = -(np.sin(2*theta))/(4*sigma**2) + (np.sin(2*theta))/(4*sigma**2)\n\t\tc = (np.sin(theta)**2)/(2*sigma**2) + (np.cos(theta)**2)/(2*sigma**2)\n\t\tgauss = np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))\n\t\tgauss[disk((self.n_pixel_elements-1)/2)==0] = 0 \n\t\treturn gauss\n\n\tdef raw_model_prediction(self, xo, yo, sigma):\n\t\tg = self.twoD_Gaussian(xo, yo, sigma).reshape(self.n_pixel_elements**2)\n\t\treturn np.dot(self.design_matrix.reshape(-1,self.n_pixel_elements**2), g)\n\n\tdef hrf_model_prediction(self, xo, yo, sigma,hrf_params):\n\n\t\trmp = self.raw_model_prediction( xo, yo, sigma)\n\t\toriginal_size = len(rmp)\n\t\trmp = np.repeat(rmp, self.ssr, axis=0)\n\t\txx = np.arange(0,32,self.rtime/float(self.ssr))\n\t\tself.hrf_kernel = hrf_params[0] * he.hrf.spmt(xx) +hrf_params[1]* he.hrf.dspmt(xx) +hrf_params[2] * he.hrf.ddspmt(xx)\n\t\tif self.hrf_kernel.shape[0] % 2 == 1:\n\t\t\tself.hrf_kernel = np.r_[self.hrf_kernel, 0]\n\t\tself.hrf_kernel /= np.abs(self.hrf_kernel).sum()\n\n\t\t# add slice timing correction to fit\n\t\tconvolved_mp = fftconvolve( rmp, self.hrf_kernel, 'full' )[int(self.slice)::int(self.ssr)][:int(original_size)]\n\n\t\treturn convolved_mp, self.hrf_kernel\n\ndef fitRidge_for_Dumoulin(design_matrix, timeseries, n_iter = 50, compute_score = False, verbose = True,valid_regressors=[],n_pixel_elements=[], alpha = 1.0):\n\t\"\"\"fitRidge fits a design matrix to a given timeseries.\n\tIt computes the coefficients and returns these coefficients\n\tplus the correlation between the model fit and timeseries.\n\t\"\"\"\n\tbr = Ridge(alpha = alpha)\n\tbr.fit(design_matrix, timeseries)\n\tpredicted_signal = br.coef_ * design_matrix\n\tsrp = list(spearmanr(timeseries, predicted_signal.sum(axis = 1)))\n\tsrp = [srp[0], -np.log10(srp[1])]\n\n\tPRF = np.zeros(n_pixel_elements**2)\n\tPRF[valid_regressors] = br.coef_\n\tPRF = np.reshape(PRF,(n_pixel_elements,n_pixel_elements))\n\tmaximum = ndimage.measurements.maximum_position(PRF)\n\n\tstart_params = {}\n\tstart_params['xo'], start_params['yo'] = maximum[1]/float(n_pixel_elements)*2-1, maximum[0]/float(n_pixel_elements)*2-1\n\n\treturn start_params, PRF, predicted_signal.sum(axis = 1)\n\ndef fit_PRF_on_concatenated_data(data_shared,voxels_in_this_slice,n_TRs,n_slices,fit_on_all_data,plotbool,raw_design_matrices, dm_for_BR,\n\tvalid_regressors, n_pixel_elements_convolved, n_pixel_elements_raw,plotdir,voxno,slice_no,randint,roi,TR,model,hrf_params_shared,all_results_shared,conditions,\n\tresults_frames,\tpostFix=[],max_eccentricity=1,max_xy = 5,orientations=['0','45','90','135','180','225','270','315','X'],stim_radius = 7.5,\n\tnuisance_regressors = []):\n\t\"\"\"\n\t\"\"\"\n\n\t# grab data for this fit procedure from shared memory\n\ttime_course = np.array(data_shared[:,voxels_in_this_slice][:,voxno])\n\thrf_params = np.array(hrf_params_shared[:,voxels_in_this_slice][:,voxno])\n\n\tn_orientations = len(orientations)\n\n\t# already initialize the final PRF dict\n\tPRFs = {}\n\n\tif fit_on_all_data:\n\n\t\t#########################################################################################################################################################################################################################\n\t\t#### Instantiate parameters \n\t\t#########################################################################################################################################################################################################################\n\n\t\t## initiate search space with Ridge prefit\n\t\tRidge_start_params, PRFs['Ridge'], BR_predicted = fitRidge_for_Dumoulin(dm_for_BR, time_course, valid_regressors=valid_regressors, n_pixel_elements=n_pixel_elements_convolved, alpha=1e14)\n\n\t\t## initiate parameters:\n\t\tparams = Parameters()\n\t\t\n\t\t# one baseline parameter\n\t\tparams.add('baseline',value=0.0)\n\n\t\t# two location parameters\n\t\tparams.add('xo_%s'%conditions[0], value = Ridge_start_params['xo'])\n\t\tparams.add('yo_%s'%conditions[0], value = Ridge_start_params['yo'])\n\n\t\tparams.add('sigma_center_%s'%conditions[0],value=0.1,min=1e-20)#min=1e-201 # this means initialization at 0.1 * 7.5 = 0.75 degrees, with minimum of 0.075 degrees\n\t\tparams.add('amp_center_%s'%conditions[0],value=0.05,min=1e-20)#min=1e-201 # this is initialized at 0.001\n\n\t\t# surround parameters\n\t\tparams.add('sigma_surround_%s'%conditions[0],value=0.3,expr='sigma_center_%s+delta_sigma_%s'%(conditions[0],conditions[0])) # surround size should roughly be 5 times that of the center\n\t\tparams.add('delta_sigma_%s'%conditions[0],value=0.4,min=1e-20) # this difference parameter ensures that the surround is always larger than the center#,min=1e-20000000001\n\t\tparams.add('amp_surround_%s'%conditions[0],value=-0.005,max=1e-20,expr='-amp_center_%s+delta_amplitude_%s'%(conditions[0],conditions[0])) # initialized at 10% of center amplitude #max=-0.0000000001,\n\t\tparams.add('delta_amplitude_%s'%conditions[0],value=0.045,min=1e-20) # this difference parameter ensures that the surround is never deeper than the center is high,min=0.0000000001\n\n\t\t# when fitting an OG model, set all surround and delta parameters to 0 and to not vary and set the expression to None, otherwise it will start to vary anyway\n\t\tif model == 'OG':\t\n\t\t\tparams['amp_surround_%s'%conditions[0]].value,params['amp_surround_%s'%conditions[0]].vary,params['amp_surround_%s'%conditions[0]].expr = 0, False, None\n\t\t\tparams['delta_sigma_%s'%conditions[0]].vary,params['sigma_surround_%s'%conditions[0]].vary = False, False\n\t\t\tparams['delta_amplitude_%s'%conditions[0]].vary = False\n\telse:\n\n\t\t#########################################################################################################################################################################################################################\n\t\t#### INITIATING PARAMETERS with all results\n\t\t#########################################################################################################################################################################################################################\n\n\t\t# grab data for this fit procedure from shared memory\n\t\tall_results = np.array(all_results_shared[:,voxels_in_this_slice][:,voxno])\n\n\t\t## initiate parameters:\n\t\tparams = Parameters()\n\n\t\t# shared baseline param:\n\t\tparams.add('baseline', value = all_results[results_frames['baseline']])\n\n\t\t# location parameters\n\t\tfor condition in conditions:\n\t\t\tparams.add('xo_%s'%condition, value = all_results[results_frames['xo']])\n\t\t\tparams.add('yo_%s'%condition, value = all_results[results_frames['yo']])\n\n\t\t\t# center parameters:\n\t\t\tparams.add('sigma_center_%s'%condition,value=all_results[results_frames['sigma_center']]/stim_radius,min=1e-20) # this means initialization at 0.05/2 * 15 = 1.5 degrees, ,min=0.0084\n\t\t\tparams.add('amp_center_%s'%condition,value=all_results[results_frames['amp_center']],min=1e-20) # this is initialized at 0.001 ,min=0.0000000001\n\n\t\t\t# surround parameters\n\t\t\tparams.add('sigma_surround_%s'%condition,value=all_results[results_frames['sigma_surround']]/stim_radius,expr='sigma_center_%s+delta_sigma_%s'%(condition,condition)) # surround size should roughly be 5 times that of the center\n\t\t\tparams.add('amp_surround_%s'%condition,value=all_results[results_frames['amp_surround']],max=-1e-20,expr='-amp_center_%s+delta_amplitude_%s'%(condition,condition)) # initialized at 10% of center amplitudemax=-0.0000000001\n\t\t\tparams.add('delta_sigma_%s'%condition,value=all_results[results_frames['delta_sigma']],min=1e-20) # this difference parameter ensures that the surround is always larger than the centermin=0.0000000001\n\t\t\tparams.add('delta_amplitude_%s'%condition,value=all_results[results_frames['delta_amplitude']],min=1e-20) # this difference parameter ensures that the surround is never deeper than the center is highmin=0.0000000001\n\n\t\t\t# when fitting an OG model, set all surround and delta parameters to 0 and to not vary and set the expression to None, otherwise it will start to vary anyway\n\t\t\tif model == 'OG':\t\n\t\t\t\tparams['amp_surround_%s'%condition].value,params['amp_surround_%s'%condition].vary,params['amp_surround_%s'%condition].expr = 0, False, None\n\t\t\t\tparams['delta_sigma_%s'%condition].vary,params['sigma_surround_%s'%condition].vary = False, False\n\t\t\t\tparams['delta_amplitude_%s'%condition].vary=False\n\n\t\tg = gpf(design_matrix = raw_design_matrices[conditions[0]], max_eccentricity = max_eccentricity, n_pixel_elements = n_pixel_elements_raw, rtime = TR, ssr = 1,slice_no=slice_no)\n\t\t\n\t\t# recreate PRFs\n\t\tthis_surround_PRF = g.twoD_Gaussian(all_results[results_frames['xo']],all_results[results_frames['yo']],\n\t\t\tall_results[results_frames['sigma_surround']]/stim_radius) * all_results[results_frames['amp_surround']]\n\t\tthis_center_PRF = g.twoD_Gaussian(all_results[results_frames['xo']], all_results[results_frames['yo']],\n\t\t\tall_results[results_frames['sigma_center']]/stim_radius) * all_results[results_frames['amp_center']]\n\t\tPRFs['All_fit'] = this_center_PRF + this_surround_PRF\n\n\t#########################################################################################################################################################################################################################\n\t#### Prepare fit object and function\n\t#########################################################################################################################################################################################################################\n\n\t# initiate model prediction object\n\tssr = np.round(1/(TR/float(n_slices)))\n\t\n\tgpfs = {}\n\tfor condition in conditions:\n\t\tgpfs[condition] = gpf(design_matrix = raw_design_matrices[condition], max_eccentricity = max_eccentricity, n_pixel_elements = n_pixel_elements_raw, rtime = TR, ssr = ssr,slice_no=slice_no)\n\n\tdef residual(params,recreate=False):\n\n\t\t# combine all stimulus regressors\n\t\tcombined_model_prediction = np.ones_like(time_course)*params['baseline'].value\n\t\tfor ci,condition in enumerate(conditions):\n\t\t\tcombined_model_prediction += gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, \n\t\t\t\tparams['sigma_center_%s'%condition].value,hrf_params)[0] * params['amp_center_%s'%condition].value\n\t\t\tcombined_model_prediction += gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, \n\t\t\t\tparams['sigma_surround_%s'%condition].value, hrf_params)[0] * params['amp_surround_%s'%condition].value\n\n\t\treturn time_course - combined_model_prediction\n\n\t#########################################################################################################################################################################################################################\n\t#### evalute fit\n\t#########################################################################################################################################################################################################################\n\n\t# optimize parameters\n\tminimize(residual, params, args=(), kws={},method='powell')\n\n\t#########################################################################################################################################################################################################################\n\t#### Recreate resulting predictions and PRFs with optimized parameters\n\t#########################################################################################################################################################################################################################\n\n\t# initiate model prediction at baseline value\n\tcombined_model_prediction = np.ones_like(time_course) * params['baseline'].value\n\n\t# now loop over conditions, create prediction and add to total prediction\n\tmodel_predictions = {}\n\tfor ci,condition in enumerate(conditions):\n\t\tthis_center_model_prediction = gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, \n\t\t\tparams['sigma_center_%s'%condition].value,hrf_params)[0] * params['amp_center_%s'%condition].value\n\t\tthis_surround_model_prediction = gpfs[condition].hrf_model_prediction(params['xo_%s'%condition].value, params['yo_%s'%condition].value, \n\t\t\tparams['sigma_surround_%s'%condition].value, hrf_params)[0] * params['amp_surround_%s'%condition].value\n\t\tmodel_predictions[condition] = this_center_model_prediction + this_surround_model_prediction\n\t\tcombined_model_prediction += model_predictions[condition]\n\n\t\t# recreate PRFs\n\t\tthis_center_PRF = gpfs[condition].twoD_Gaussian(params['xo_%s'%condition].value, params['yo_%s'%condition].value,\n\t\t\tparams['sigma_center_%s'%condition].value) * params['amp_center_%s'%condition].value\n\t\tthis_surround_PRF = gpfs[condition].twoD_Gaussian(params['xo_%s'%condition].value, params['yo_%s'%condition].value,\n\t\t\tparams['sigma_surround_%s'%condition].value) * params['amp_surround_%s'%condition].value\n\t\tPRFs[condition] = this_center_PRF + this_surround_PRF\n\n\n\t#########################################################################################################################################################################################################################\n\t#### Get fit diagnostics\n\t#########################################################################################################################################################################################################################\n\n\t# add fwhm, necessary when fitting DoG\n\treconstruction_radius = 10\n\tthis_ssr = 1000 \n\tt = np.linspace(-reconstruction_radius,reconstruction_radius,this_ssr*reconstruction_radius)\n\t\n\tfwhms = {}\n\tsurround_sizes = {}\n\tfor condition in conditions:\n\t\tPRF_2D = params['amp_center_%s'%condition].value * np.exp(-t**2/(2*params['sigma_center_%s'%condition].value**2)) + params['amp_surround_%s'%condition].value * np.exp(-t**2/(2*(params['sigma_surround_%s'%condition].value)**2))\n\t\t## then, we fit a spline through this line, and get the roots (the fwhm points) of the spline:\n\t\tspline=interpolate.UnivariateSpline(range(len(PRF_2D)),PRF_2D-np.max(PRF_2D)/2,s=0)\n\t\t## and compute the distance between them\n\t\ttry:\n\t\t\tfwhms[condition] = ((np.diff(spline.roots())/len(t)*reconstruction_radius) * stim_radius)[0]\n\t\texcept:\n\t\t\t## when this procedure fails, set fwhm to 0:\n\t\t\tfwhms[condition] = 0\n\t\t\n\t\t## now find the surround size in the same way\n\t\tif (model == 'OG') + (params['amp_surround_%s'%condition].value == 0):\n\t\t\tsurround_sizes[condition] = 0\n\t\telse:\n\t\t\tspline = interpolate.UnivariateSpline(range(len(PRF_2D)),PRF_2D+np.min(PRF_2D),s=0)\n\t\t\tsurround_sizes[condition] = ((np.diff(spline.roots())/len(t)*reconstruction_radius) * stim_radius)[0]\n\n\t## EVALUATE OVERALL MODEL FIT QUALITY\n\tstats = {}\n\tstats['spearman'] = spearmanr(time_course,combined_model_prediction)[0]\n\tstats['pearson'] = pearsonr(time_course,combined_model_prediction)[0]\n\tstats['RSS'] = np.sum((time_course - combined_model_prediction)**2)\n\tstats['r_squared'] = 1 - stats['RSS']/np.sum((time_course - np.mean(time_course)) ** 2) \n\tstats['kendalls_tau'] = kendalltau(time_course,combined_model_prediction)[0]\n\n\t## CREATE SEPERATE RESULTS DICT PER CONDITION\n\tresults = {}\n\tfor condition in conditions:\n\t\tresults[condition] = {}\n\t\tresults[condition]['baseline'] = params['baseline'].value\n\t\t# params from fit\n\t\tfor key in params.keys():\n\t\t\tif condition in key:\n\t\t\t\tif condition in key:\n\t\t\t\t\t# leave out the condition in the keys (as the results frames are identical across conditions)\n\t\t\t\t\tnew_key = key[:-len(condition)-1]\n\t\t\t\telse:\n\t\t\t\t\tnew_key = key\n\t\t\t\tresults[condition][new_key] = params[key].value\n\n\t\tresults[condition]['ecc'] = np.linalg.norm([params['xo_%s'%condition].value,params['yo_%s'%condition].value]) * stim_radius\n\t\tresults[condition]['sigma_center'] *= stim_radius\n\t\tresults[condition]['sigma_surround'] *= stim_radius\n\n\t\t# derived params\n\t\tresults[condition]['polar'] = np.arctan2(params['yo_%s'%condition].value,params['xo_%s'%condition].value)\n\t\tresults[condition]['fwhm'] = fwhms[condition]\n\t\tresults[condition]['surround_size'] = surround_sizes[condition]\n\t\tresults[condition]['SI'] = ((params['amp_surround_%s'%condition].value * (params['sigma_surround_%s'%condition].value**2) ) \n\t\t\t/ (params['amp_center_%s'%condition].value * (params['sigma_center_%s'%condition].value**2) ))\n\t\t\n\t\t# if the resulting PRF falls outside of the stimulus radius,\n\t\t# set the multiplier here to 0 so that it falls off the retmaps\n\t\tif results[condition]['ecc'] < (stim_radius):\n\t\t\tmultiplier = stats['r_squared']\n\t\telse:\n\t\t\tmultiplier = 0.001\n\n\t\t# here for only voxels within stim region:\n\t\tresults[condition]['real_polar_stim_region'] = np.cos(results[condition]['polar'])*np.arctanh(multiplier)\n\t\tresults[condition]['imag_polar_stim_region'] = np.sin(results[condition]['polar'])*np.arctanh(multiplier)\n\t\t\n\t\t# and for all voxels:\n\t\tresults[condition]['real_polar'] = np.cos(results[condition]['polar'])*np.arctanh(stats['r_squared'])\n\t\tresults[condition]['imag_polar'] = np.sin(results[condition]['polar'])*np.arctanh(stats['r_squared'])\n\n\n\t#########################################################################################################################################################################################################################\n\t#### Plot results\n\t#########################################################################################################################################################################################################################\n\n\tif plotbool * (stats['r_squared']>0.0):# (np.random.randint(10)<10):#* (stats['r_squared']>0.1):#(stats['r_squared']>0.1):# * :# :#* (results['ecc'] < 3) :#:# * * randint ) #* :#* )\n\n\t\tn_TRs = n_TRs[0]\n\t\tn_runs = int(len(time_course) / n_TRs)\n\t\tif fit_on_all_data:\n\t\t\tplot_conditions = ['Ridge','All']\n\t\telse:\n\t\t\tplot_conditions = conditions + ['All_fit']\n\t\tplot_dir = os.path.join(plotdir, '%s'%roi)\n\t\tif not os.path.isdir(plot_dir): os.mkdir(plot_dir)\n\n\t\tf=pl.figure(figsize=(20,8)); rowi = (n_runs+4)\n\n\t\timport colorsys\n\t\tcolors = np.array([colorsys.hsv_to_rgb(c,0.6,0.9) for c in np.linspace(0,1,3+1)])[:-1]\n\n\t\tfor runi in range(n_runs):\n\t\t\ts = f.add_subplot(rowi,1,runi+1)\n\t\t\tpl.plot(time_course[n_TRs*runi:n_TRs*(runi+1)],'-ok',linewidth=0.75,markersize=2.5)#,label='data'\n\t\t\tif not fit_on_all_data:\n\t\t\t\tfor ci, condition in enumerate(conditions):\n\t\t\t\t\tpl.plot(model_predictions[condition][n_TRs*runi:n_TRs*(runi+1)]+params['baseline'].value,color=colors[ci],label='%s model'%condition,linewidth=2)\t\t\t\t\n\t\t\t\tpl.plot([0,n_TRs],[params['baseline'].value,params['baseline'].value],color=colors[0],linewidth=1)\t\n\t\t\telse:\n\t\t\t\tpl.plot(combined_model_prediction[n_TRs*runi:n_TRs*(runi+1)],color=colors[0],label='model',linewidth=2)\t\n\t\t\tsn.despine(offset=10)\n\t\t\tpl.xlim(0,850)\n\t\t\tif runi == (n_runs-1):\n\t\t\t\tpl.xlabel('TRs')\n\t\t\telse:\n\t\t\t\tpl.xticks([])\n\t\t\tif runi == (n_runs/2):\n\t\t\t\tpl.legend(loc='best',fontsize=8)\n\t\t\t\tif 'psc' in postFix:\n\t\t\t\t\tpl.ylabel('% signal change')\n\t\t\t\telse:\n\t\t\t\t\tpl.ylabel('unkown unit')\t\n\t\t\tpl.yticks([int(np.min(time_course)),0,int(np.max(time_course))])\t\n\t\t\tpl.ylim([int(np.min(time_course)),int(np.max(time_course))])\n\n\n\t\trowi = (n_runs+2)/2\n\t\tk = 0\n\t\tfor ci, condition in enumerate(plot_conditions):\n\t\t\tk+= 1\n\t\t\ts = f.add_subplot(rowi,len(plot_conditions)*2,(rowi-1)*len(plot_conditions)*2+k,aspect='equal')\n\t\t\tpl.imshow(PRFs[condition],origin='lowerleft',interpolation='nearest',cmap=cm.coolwarm)\n\n\t\t\tpl.axis('off')\n\t\t\ts.set_title('%s PRF'%condition)\n\t\t\t\n\t\t\tk+= 1\n\t\t\tif not (condition == 'Ridge') + (condition == 'All_fit'):\n\t\t\t\ts = f.add_subplot(rowi,len(plot_conditions)*2,(rowi-1)*len(plot_conditions)*2+k)\n\t\t\t\tpl.imshow(np.ones((n_pixel_elements_raw,n_pixel_elements_raw)),cmap='gray')\n\t\t\t\tpl.clim(0,1)\n\t\t\t\tif model == 'OG':\n\t\t\t\t\ts.text(n_pixel_elements_raw/2,n_pixel_elements_raw/2, \"\\n%s PARAMETERS: \\n\\nbaseline: %.2f\\nsize: %.2f\\namplitude: %.6f\\n\\n\\nDERIVED QUANTIFICATIONS: \\n\\nr-squared: %.2f\\necc: %.2f\\nFWHM: %.2f\"%\n\t\t\t\t\t\t(condition,results[condition]['baseline'],results[condition]['sigma_center'],results[condition]['amp_center'],\n\t\t\t\t\t\t\tstats['r_squared'],results[condition]['ecc'],results[condition]['fwhm']),\n\t\t\t\t\t\thorizontalalignment='center',verticalalignment='center',fontsize=10,bbox={'facecolor':'white', 'alpha':1, 'pad':10})\n\t\t\t\telif model == 'DoG':\n\t\t\t\t\ts.text(n_pixel_elements_raw/2,n_pixel_elements_raw/2, \"\\n%s PARAMETERS: \\n\\nbaseline: %.2f\\nsd center: %.2f\\nsd surround: %.2f\\namp center: %.6f\\namp surround: %.6f\\n\\nDERIVED QUANTIFICATIONS: \\n\\nr squared: %.2f\\necc: %.2f\\nFWHM: %.2f\\nsurround size: %.2f\\nsupression index: %.2f\"\n\t\t\t\t\t\t%(condition,results[condition]['baseline'],results[condition]['sigma_center'],results[condition]['sigma_surround'],results[condition]['amp_center'],\n\t\t\t\t\t\tresults[condition]['amp_surround'],stats['r_squared'],results[condition]['ecc'],results[condition]['fwhm'],results[condition]['surround_size'],\n\t\t\t\t\t\tresults[condition]['SI']),horizontalalignment='center',verticalalignment='center',fontsize=10,bbox={'facecolor':'white', 'alpha':1, 'pad':10})\n\t\t\t\tpl.axis('off')\n\n\t\t# pl.tight_layout()\n\t\tpl.savefig(os.path.join(plot_dir, 'vox_%d_%d_%d.pdf'%(slice_no,voxno,n_pixel_elements_raw)))\n\t\tpl.close()\n\n\treturn results, stats\n\n\n\t\n" ]
[ [ "numpy.arctanh", "numpy.linspace", "numpy.arctan2", "numpy.max", "numpy.mean", "scipy.stats.spearmanr", "numpy.exp", "numpy.ones_like", "numpy.reshape", "numpy.sin", "numpy.repeat", "numpy.zeros", "scipy.ndimage.measurements.maximum_position", "scipy.signal.fftconvolve", "numpy.min", "scipy.stats.pearsonr", "sklearn.linear_model.Ridge", "numpy.log10", "numpy.meshgrid", "numpy.array", "numpy.sum", "numpy.abs", "numpy.linalg.norm", "numpy.cos", "numpy.ones", "scipy.stats.kendalltau" ] ]
sahelahmd/Facial-Recognition-Tracking-Computer-Vision-Python
[ "5e4e19087cf4ab988d4bebae6127d775292675e3" ]
[ "IrisTrackingRectDLIB.py" ]
[ "import cv2\r\nimport numpy as np\r\nimport dlib\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\n#refer to the 68-face-landmarks-labeled-by-dlib-software-automatically.png to understand why certain coordinates are used to find certain parts of the face\r\n\r\ndetector = dlib.get_frontal_face_detector() #front face classifier\r\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\") #assigned coordinates of the face by DLIB\r\n\r\nwhile True:\r\n ret, frame = cap.read() #return status variable and the captured image\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #grayscale image for detection filtering of eyes\r\n\r\n faces = detector(gray) #array of all faces\r\n for face in faces:\r\n x, y = face.left(), face.top() # Top Left coordinates of face in window\r\n x1, y1 = face.right(), face.bottom() # Bottom right coordinates of face in windows\r\n cv2.rectangle(frame, (x,y), (x1,y1), (0,255,0), 2) # form a rectangle based on previous two coordinates\r\n\r\n poslandmarkpoints = predictor(gray, face)\r\n\r\n # LEFT EYE TRACKING WITH DLIB\r\n # Using the DLIB landmarks diagram, coordinates 38 will be used for length of box\r\n # Width\r\n #(poslandmarkpoints.part(38).x, poslandmarkpoints.part(38).y) coordinate for left most eye to determine width of eye rectangle\r\n \r\n # Using the DLIB landmarks diagram, coordinates 41 will be used for height of box\r\n # Height\r\n #(poslandmarkpoints.part(41).x, poslandmarkpoints.part(41).y) first coordinate for left most eye to determine width of eye rectangle\r\n\r\n #x1,y1 ------\r\n #| |\r\n #| |\r\n #| |\r\n #--------x2,y2\r\n \r\n left_eye1x = poslandmarkpoints.part(38).x\r\n left_eye1y = poslandmarkpoints.part(38).y\r\n left_eye2x = poslandmarkpoints.part(41).x\r\n left_eye2y = poslandmarkpoints.part(41).y\r\n \r\n\r\n \r\n \r\n # RIGHT EYE TRACKING WITH DLIB\r\n # Using the DLIB landmarks diagram, coordinates 44 will be used for length of box\r\n # Width\r\n #(poslandmarkpoints.part(44).x, poslandmarkpoints.part(44).y) coordinate for left most eye to determine width of eye rectangle\r\n \r\n # Using the DLIB landmarks diagram, coordinates 47 will be used for height of box\r\n # Height\r\n #(poslandmarkpoints.part(47).x, poslandmarkpoints.part(47).y) first coordinate for left most eye to determine width of eye rectangle\r\n\r\n \r\n #x1,y1 ------\r\n #| |\r\n #| |\r\n #| |\r\n #--------x2,y2\r\n \r\n \r\n right_eye1x = poslandmarkpoints.part(44).x\r\n right_eye1y = poslandmarkpoints.part(44).y\r\n right_eye2x = poslandmarkpoints.part(47).x\r\n right_eye2y = poslandmarkpoints.part(47).y\r\n\r\n \r\n\r\n # ================================ tracking left eye on new window ================================\r\n \r\n #Left Eye Tracking\r\n leftEyeTrack = np.array([(right_eye1x-40,right_eye1y-20),\r\n (right_eye1x-40,right_eye2y+20),\r\n (right_eye2x+40,right_eye2y-20),\r\n (right_eye2x+40,right_eye2y+20)\r\n ],\r\n np.int32)\r\n \r\n\r\n \r\n #Right Eye Tracking\r\n rightEyeTrack = np.array([(left_eye1x-40,left_eye1y-20),\r\n (left_eye1x-40,left_eye2y+20),\r\n (left_eye2x+40,left_eye2y-20),\r\n (left_eye2x+40,left_eye2y+20)\r\n ],\r\n np.int32)\r\n\r\n lemin_x = np.min(leftEyeTrack[:, 0])\r\n lemax_x = np.max(leftEyeTrack[:, 0])\r\n lemin_y = np.min(leftEyeTrack[:, 1])\r\n lemax_y = np.max(leftEyeTrack[:, 1])\r\n\r\n left_eye = frame[lemin_y : lemax_y, lemin_x : lemax_x]\r\n left_eye = cv2.resize(left_eye, None, fx = 5, fy = 5) #fx and fy is the scale factor for frame\r\n cv2.imshow(\"Left Eye\", left_eye)\r\n\r\n # ================================ tracking left eye on new window ================================\r\n\r\n \r\n # ================================ tracking right eye on new window ================================\r\n\r\n remin_x = np.min(rightEyeTrack[:, 0])\r\n remax_x = np.max(rightEyeTrack[:, 0])\r\n remin_y = np.min(rightEyeTrack[:, 1])\r\n remax_y = np.max(rightEyeTrack[:, 1])\r\n\r\n right_eye = frame[remin_y : remax_y, remin_x : remax_x]\r\n right_eye = cv2.resize(right_eye, None, fx = 5, fy = 5) #fx and fy is the scale factor for frame\r\n cv2.imshow(\"Right Eye\", right_eye)\r\n\r\n # ================================ tracking right eye on new window ================================\r\n\r\n #draw rectangle after eye frame is on window to prevent drawn polys from showing in eye window \r\n cv2.rectangle(frame, (right_eye1x-40,right_eye1y-20), (right_eye2x+40,right_eye2y+20), (0,255,0), 2)\r\n cv2.rectangle(frame, (left_eye1x-40,left_eye1y-20), (left_eye2x+40,left_eye2y+20), (0,255,0), 2)\r\n \r\n cv2.imshow(\"IrisTrackingRectDLIB\", frame)\r\n key = cv2.waitKey(1)\r\n if key == 27: #esc key is pressed\r\n break\r\n\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n" ]
[ [ "numpy.max", "numpy.array", "numpy.min" ] ]
showkeyjar/AutoMakeHuman
[ "d7a0f0b093937129567332bfecadb450a2b8db2e" ]
[ "test/keras-rl/agent5.py" ]
[ "from __future__ import division\nimport argparse\n\nfrom PIL import Image\nimport numpy as np\nimport gym\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten, Convolution2D, Permute\nfrom keras.optimizers import Adam\nimport keras.backend as K\n\nfrom rl.agents.dqn import DQNAgent\nfrom rl.policy import LinearAnnealedPolicy, BoltzmannQPolicy, EpsGreedyQPolicy\nfrom rl.memory import SequentialMemory\nfrom rl.core import Processor\nfrom rl.callbacks import FileLogger, ModelIntervalCheckpoint\n\n\nINPUT_SHAPE = (84, 84)\nWINDOW_LENGTH = 4\n\n\nclass AtariProcessor(Processor):\n def process_observation(self, observation):\n assert observation.ndim == 3 # (height, width, channel)\n img = Image.fromarray(observation)\n img = img.resize(INPUT_SHAPE).convert('L') # resize and convert to grayscale\n processed_observation = np.array(img)\n assert processed_observation.shape == INPUT_SHAPE\n return processed_observation.astype('uint8') # saves storage in experience memory\n\n def process_state_batch(self, batch):\n # We could perform this processing step in `process_observation`. In this case, however,\n # we would need to store a `float32` array instead, which is 4x more memory intensive than\n # an `uint8` array. This matters if we store 1M observations.\n processed_batch = batch.astype('float32') / 255.\n return processed_batch\n\n def process_reward(self, reward):\n return np.clip(reward, -1., 1.)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--mode', choices=['train', 'test'], default='train')\nparser.add_argument('--env-name', type=str, default='BreakoutDeterministic-v4')\nparser.add_argument('--weights', type=str, default=None)\nargs = parser.parse_args()\n\n# Get the environment and extract the number of actions.\nenv = gym.make(args.env_name)\nnp.random.seed(123)\nenv.seed(123)\nnb_actions = env.action_space.n\n\n# Next, we build our model. We use the same model that was described by Mnih et al. (2015).\ninput_shape = (WINDOW_LENGTH,) + INPUT_SHAPE\nmodel = Sequential()\nif K.image_dim_ordering() == 'tf':\n # (width, height, channels)\n model.add(Permute((2, 3, 1), input_shape=input_shape))\nelif K.image_dim_ordering() == 'th':\n # (channels, width, height)\n model.add(Permute((1, 2, 3), input_shape=input_shape))\nelse:\n raise RuntimeError('Unknown image_dim_ordering.')\nmodel.add(Convolution2D(32, 8, 8, subsample=(4, 4)))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(64, 4, 4, subsample=(2, 2)))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(64, 3, 3, subsample=(1, 1)))\nmodel.add(Activation('relu'))\nmodel.add(Flatten())\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nmodel.add(Dense(nb_actions))\nmodel.add(Activation('linear'))\nprint(model.summary())\n\n# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n# even the metrics!\nmemory = SequentialMemory(limit=1000000, window_length=WINDOW_LENGTH)\nprocessor = AtariProcessor()\n\n# Select a policy. We use eps-greedy action selection, which means that a random action is selected\n# with probability eps. We anneal eps from 1.0 to 0.1 over the course of 1M steps. This is done so that\n# the agent initially explores the environment (high eps) and then gradually sticks to what it knows\n# (low eps). We also set a dedicated eps value that is used during testing. Note that we set it to 0.05\n# so that the agent still performs some random actions. This ensures that the agent cannot get stuck.\npolicy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1., value_min=.1, value_test=.05,\n nb_steps=1000000)\n\n# The trade-off between exploration and exploitation is difficult and an on-going research topic.\n# If you want, you can experiment with the parameters or use a different policy. Another popular one\n# is Boltzmann-style exploration:\n# policy = BoltzmannQPolicy(tau=1.)\n# Feel free to give it a try!\n\ndqn = DQNAgent(model=model, nb_actions=nb_actions, policy=policy, memory=memory,\n processor=processor, nb_steps_warmup=50000, gamma=.99, target_model_update=10000,\n train_interval=4, delta_clip=1.)\ndqn.compile(Adam(lr=.00025), metrics=['mae'])\n\nif args.mode == 'train':\n # Okay, now it's time to learn something! We capture the interrupt exception so that training\n # can be prematurely aborted. Notice that you can the built-in Keras callbacks!\n weights_filename = 'dqn_{}_weights.h5f'.format(args.env_name)\n checkpoint_weights_filename = 'dqn_' + args.env_name + '_weights_{step}.h5f'\n log_filename = 'dqn_{}_log.json'.format(args.env_name)\n callbacks = [ModelIntervalCheckpoint(checkpoint_weights_filename, interval=250000)]\n callbacks += [FileLogger(log_filename, interval=100)]\n dqn.fit(env,visualize=True, callbacks=callbacks, nb_steps=1750000, log_interval=10000)\n\n # After training is done, we save the final weights one more time.\n dqn.save_weights(weights_filename, overwrite=True)\n\n # Finally, evaluate our algorithm for 10 episodes.\n dqn.test(env, nb_episodes=10, visualize=False)\nelif args.mode == 'test':\n weights_filename = 'dqn_{}_weights.h5f'.format(args.env_name)\n if args.weights:\n weights_filename = args.weights\n dqn.load_weights(weights_filename)\n dqn.test(env, nb_episodes=10, visualize=True)\n" ]
[ [ "numpy.array", "numpy.random.seed", "numpy.clip" ] ]
franckbrl/tensor2tensor
[ "b9b9af746e7473b1d36a640e96aff3283360bf87" ]
[ "tensor2tensor/utils/decoding.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Decoding utilities.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport operator\nimport os\n\n# Dependency imports\n\nimport numpy as np\nimport six\n\nfrom six.moves import input # pylint: disable=redefined-builtin\n\nfrom tensor2tensor.data_generators import problem as problem_lib\nfrom tensor2tensor.data_generators import text_encoder\nimport tensorflow as tf\n\nFLAGS = tf.flags.FLAGS\n\n# Number of samples to draw for an image input (in such cases as captioning)\nIMAGE_DECODE_LENGTH = 100\n\n\ndef decode_hparams(overrides=\"\"):\n \"\"\"Hyperparameters for decoding.\"\"\"\n hp = tf.contrib.training.HParams(\n save_images=False,\n log_targets=True,\n extra_length=100,\n batch_size=0,\n beam_size=4,\n alpha=0.6,\n return_beams=False,\n write_beam_scores=False,\n max_input_size=-1,\n identity_output=False,\n num_samples=-1,\n delimiter=\"\\n\")\n hp = hp.parse(overrides)\n return hp\n\n\ndef log_decode_results(inputs,\n outputs,\n problem_name,\n prediction_idx,\n inputs_vocab,\n targets_vocab,\n targets=None,\n save_images=False,\n model_dir=None,\n identity_output=False,\n log_targets=True):\n \"\"\"Log inference results.\"\"\"\n is_image = \"image\" in problem_name\n decoded_inputs = None\n if is_image and save_images:\n save_path = os.path.join(model_dir, \"%s_prediction_%d.jpg\" %\n (problem_name, prediction_idx))\n show_and_save_image(inputs / 255., save_path)\n elif inputs_vocab:\n if identity_output:\n decoded_inputs = \" \".join(map(str, inputs.flatten()))\n else:\n decoded_inputs = inputs_vocab.decode(_save_until_eos(inputs, is_image))\n\n tf.logging.info(\"Inference results INPUT: %s\" % decoded_inputs)\n\n decoded_targets = None\n decoded_outputs = None\n if identity_output:\n decoded_outputs = \" \".join(map(str, outputs.flatten()))\n if targets is not None:\n decoded_targets = \" \".join(map(str, targets.flatten()))\n else:\n decoded_outputs = targets_vocab.decode(_save_until_eos(outputs, is_image))\n if targets is not None and log_targets:\n decoded_targets = targets_vocab.decode(_save_until_eos(targets, is_image))\n\n tf.logging.info(\"Inference results OUTPUT: %s\" % decoded_outputs)\n if targets is not None and log_targets:\n tf.logging.info(\"Inference results TARGET: %s\" % decoded_targets)\n return decoded_inputs, decoded_outputs, decoded_targets\n\n\ndef decode_from_dataset(estimator,\n problem_name,\n hparams,\n decode_hp,\n decode_to_file=None,\n dataset_split=None):\n \"\"\"Perform decoding from dataset.\"\"\"\n tf.logging.info(\"Performing local inference from dataset for %s.\",\n str(problem_name))\n # We assume that worker_id corresponds to shard number.\n shard = decode_hp.shard_id if decode_hp.shards > 1 else None\n\n # If decode_hp.batch_size is specified, use a fixed batch size\n if decode_hp.batch_size:\n hparams.batch_size = decode_hp.batch_size\n hparams.use_fixed_batch_size = True\n\n dataset_kwargs = {\n \"shard\": shard,\n \"dataset_split\": dataset_split,\n \"max_records\": decode_hp.num_samples\n }\n\n # Build the inference input function\n problem = hparams.problem\n infer_input_fn = problem.make_estimator_input_fn(\n tf.estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs)\n\n # Get the predictions as an iterable\n predictions = estimator.predict(infer_input_fn)\n\n # Prepare output file writers if decode_to_file passed\n if decode_to_file:\n if decode_hp.shards > 1:\n decode_filename = decode_to_file + (\"%.2d\" % decode_hp.shard_id)\n else:\n decode_filename = decode_to_file\n output_filepath = _decode_filename(decode_filename, problem_name,\n decode_hp)\n parts = output_filepath.split(\".\")\n parts[-1] = \"targets\"\n target_filepath = \".\".join(parts)\n parts[-1] = \"inputs\"\n input_filepath = \".\".join(parts)\n\n output_file = tf.gfile.Open(output_filepath, \"w\")\n target_file = tf.gfile.Open(target_filepath, \"w\")\n input_file = tf.gfile.Open(input_filepath, \"w\")\n\n problem_hparams = hparams.problem_hparams\n # Inputs vocabulary is set to targets if there are no inputs in the problem,\n # e.g., for language models where the inputs are just a prefix of targets.\n has_input = \"inputs\" in problem_hparams.vocabulary\n inputs_vocab_key = \"inputs\" if has_input else \"targets\"\n inputs_vocab = problem_hparams.vocabulary[inputs_vocab_key]\n targets_vocab = problem_hparams.vocabulary[\"targets\"]\n for num_predictions, prediction in enumerate(predictions):\n num_predictions += 1\n inputs = prediction[\"inputs\"]\n targets = prediction[\"targets\"]\n outputs = prediction[\"outputs\"]\n\n # Log predictions\n decoded_outputs = []\n decoded_scores = []\n if decode_hp.return_beams:\n output_beams = np.split(outputs, decode_hp.beam_size, axis=0)\n scores = None\n if \"scores\" in prediction:\n scores = np.split(prediction[\"scores\"], decode_hp.beam_size, axis=0)\n for i, beam in enumerate(output_beams):\n tf.logging.info(\"BEAM %d:\" % i)\n score = scores and scores[i]\n decoded = log_decode_results(\n inputs,\n beam,\n problem_name,\n num_predictions,\n inputs_vocab,\n targets_vocab,\n save_images=decode_hp.save_images,\n model_dir=estimator.model_dir,\n identity_output=decode_hp.identity_output,\n targets=targets,\n log_targets=decode_hp.log_targets)\n decoded_outputs.append(decoded)\n if decode_hp.write_beam_scores:\n decoded_scores.append(score)\n else:\n decoded = log_decode_results(\n inputs,\n outputs,\n problem_name,\n num_predictions,\n inputs_vocab,\n targets_vocab,\n save_images=decode_hp.save_images,\n model_dir=estimator.model_dir,\n identity_output=decode_hp.identity_output,\n targets=targets,\n log_targets=decode_hp.log_targets)\n decoded_outputs.append(decoded)\n\n # Write out predictions if decode_to_file passed\n if decode_to_file:\n for i, (d_input, d_output, d_target) in enumerate(decoded_outputs):\n beam_score_str = \"\"\n if decode_hp.write_beam_scores:\n beam_score_str = \"\\t%.2f\" % decoded_scores[i]\n output_file.write(\n str(d_output) + beam_score_str + decode_hp.delimiter)\n target_file.write(str(d_target) + decode_hp.delimiter)\n input_file.write(str(d_input) + decode_hp.delimiter)\n\n if (decode_hp.num_samples >= 0 and\n num_predictions >= decode_hp.num_samples):\n break\n\n if decode_to_file:\n output_file.close()\n target_file.close()\n input_file.close()\n\n tf.logging.info(\"Completed inference on %d samples.\" % num_predictions) # pylint: disable=undefined-loop-variable\n\n\ndef decode_from_file(estimator,\n filename,\n filename_sfeats,\n hparams,\n decode_hp,\n decode_to_file=None,\n checkpoint_path=None):\n \"\"\"Compute predictions on entries in filename and write them out.\"\"\"\n if not decode_hp.batch_size:\n decode_hp.batch_size = 32\n tf.logging.info(\n \"decode_hp.batch_size not specified; default=%d\" % decode_hp.batch_size)\n\n # Inputs vocabulary is set to targets if there are no inputs in the problem,\n # e.g., for language models where the inputs are just a prefix of targets.\n p_hp = hparams.problem_hparams\n has_input = \"inputs\" in p_hp.vocabulary\n inputs_vocab_key = \"inputs\" if has_input else \"targets\"\n inputs_vocab = p_hp.vocabulary[inputs_vocab_key]\n targets_vocab = p_hp.vocabulary[\"targets\"]\n sfeats_vocabs = []\n if filename_sfeats:\n for key in sorted(p_hp.vocabulary.keys()):\n if key.startswith(\"sfeats\"):\n sfeats_vocabs.append(p_hp.vocabulary[key])\n problem_name = FLAGS.problem\n\n tf.logging.info(\"Performing decoding from a file.\")\n sorted_inputs, sorted_keys = _get_sorted_inputs(filename, decode_hp.shards,\n decode_hp.delimiter)\n sorted_sfeats = []\n if filename_sfeats:\n sorted_sfeats = _get_sorted_sfeats(filename_sfeats, sorted_keys,\n decode_hp.shards, decode_hp.delimiter)\n num_decode_batches = (len(sorted_inputs) - 1) // decode_hp.batch_size + 1\n\n def input_fn():\n if filename_sfeats:\n input_gen = _decode_batch_sfeat_fn(\n p_hp, num_decode_batches, sorted_inputs,\n sorted_sfeats, inputs_vocab, sfeats_vocabs,\n decode_hp.batch_size, decode_hp.max_input_size)\n gen_fn = make_input_fn_from_generator(input_gen)\n example = gen_fn()\n return _decode_sfeat_tensor_to_features_dict(example, hparams)\n else:\n input_gen = _decode_batch_input_fn(\n num_decode_batches, sorted_inputs, inputs_vocab,\n decode_hp.batch_size, decode_hp.max_input_size)\n gen_fn = make_input_fn_from_generator(input_gen)\n example = gen_fn()\n return _decode_input_tensor_to_features_dict(example, hparams)\n\n decodes = []\n result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)\n for result in result_iter:\n if decode_hp.return_beams:\n beam_decodes = []\n beam_scores = []\n output_beams = np.split(result[\"outputs\"], decode_hp.beam_size, axis=0)\n scores = None\n if \"scores\" in result:\n scores = np.split(result[\"scores\"], decode_hp.beam_size, axis=0)\n for k, beam in enumerate(output_beams):\n tf.logging.info(\"BEAM %d:\" % k)\n score = scores and scores[k]\n _, decoded_outputs, _ = log_decode_results(result[\"inputs\"], beam,\n problem_name, None,\n inputs_vocab, targets_vocab)\n beam_decodes.append(decoded_outputs)\n if decode_hp.write_beam_scores:\n beam_scores.append(score)\n if decode_hp.write_beam_scores:\n decodes.append(\"\\t\".join(\n [\"\\t\".join([d, \"%.2f\" % s]) for d, s\n in zip(beam_decodes, beam_scores)]))\n else:\n decodes.append(\"\\t\".join(beam_decodes))\n else:\n _, decoded_outputs, _ = log_decode_results(\n result[\"inputs\"], result[\"outputs\"], problem_name,\n None, inputs_vocab, targets_vocab)\n decodes.append(decoded_outputs)\n\n # Reversing the decoded inputs and outputs because they were reversed in\n # _decode_batch_input_fn\n sorted_inputs.reverse()\n sorted_sfeats.reverse()\n decodes.reverse()\n # If decode_to_file was provided use it as the output filename without change\n # (except for adding shard_id if using more shards for decoding).\n # Otherwise, use the input filename plus model, hp, problem, beam, alpha.\n decode_filename = decode_to_file if decode_to_file else filename\n if decode_hp.shards > 1:\n decode_filename += \"%.2d\" % decode_hp.shard_id\n if not decode_to_file:\n decode_filename = _decode_filename(decode_filename, problem_name, decode_hp)\n tf.logging.info(\"Writing decodes into %s\" % decode_filename)\n outfile = tf.gfile.Open(decode_filename, \"w\")\n for index in range(len(sorted_inputs)):\n outfile.write(\"%s%s\" % (decodes[sorted_keys[index]], decode_hp.delimiter))\n\n\ndef _decode_filename(base_filename, problem_name, decode_hp):\n return \"{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes\".format(\n base=base_filename,\n model=FLAGS.model,\n hp=FLAGS.hparams_set,\n problem=problem_name,\n beam=str(decode_hp.beam_size),\n alpha=str(decode_hp.alpha))\n\n\ndef make_input_fn_from_generator(gen):\n \"\"\"Use py_func to yield elements from the given generator.\"\"\"\n first_ex = six.next(gen)\n flattened = tf.contrib.framework.nest.flatten(first_ex)\n types = [t.dtype for t in flattened]\n shapes = [[None] * len(t.shape) for t in flattened]\n first_ex_list = [first_ex]\n\n def py_func():\n if first_ex_list:\n example = first_ex_list.pop()\n else:\n example = six.next(gen)\n return tf.contrib.framework.nest.flatten(example)\n\n def input_fn():\n flat_example = tf.py_func(py_func, [], types)\n _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]\n example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)\n return example\n\n return input_fn\n\n\ndef decode_interactively(estimator, hparams, decode_hp, checkpoint_path=None):\n \"\"\"Interactive decoding.\"\"\"\n\n def input_fn():\n gen_fn = make_input_fn_from_generator(\n _interactive_input_fn(hparams, decode_hp))\n example = gen_fn()\n example = _interactive_input_tensor_to_features_dict(example, hparams)\n return example\n\n result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)\n for result in result_iter:\n is_image = False # TODO(lukaszkaiser): find out from problem id / class.\n targets_vocab = hparams.problem_hparams.vocabulary[\"targets\"]\n\n if decode_hp.return_beams:\n beams = np.split(result[\"outputs\"], decode_hp.beam_size, axis=0)\n scores = None\n if \"scores\" in result:\n scores = np.split(result[\"scores\"], decode_hp.beam_size, axis=0)\n for k, beam in enumerate(beams):\n tf.logging.info(\"BEAM %d:\" % k)\n beam_string = targets_vocab.decode(_save_until_eos(beam, is_image))\n if scores is not None:\n tf.logging.info(\"\\\"%s\\\"\\tScore:%f\" % (beam_string, scores[k]))\n else:\n tf.logging.info(\"\\\"%s\\\"\" % beam_string)\n else:\n if decode_hp.identity_output:\n tf.logging.info(\" \".join(map(str, result[\"outputs\"].flatten())))\n else:\n tf.logging.info(\n targets_vocab.decode(_save_until_eos(result[\"outputs\"], is_image)))\n\n\ndef _decode_batch_input_fn(num_decode_batches, sorted_inputs,\n vocabulary, batch_size, max_input_size):\n tf.logging.info(\" batch %d\" % num_decode_batches)\n # First reverse all the input sentences so that if you're going to get OOMs,\n # you'll see it in the first batch\n sorted_inputs.reverse()\n for b in range(num_decode_batches):\n tf.logging.info(\"Decoding batch %d\" % b)\n batch_length = 0\n batch_inputs = []\n for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]:\n input_ids = vocabulary.encode(inputs)\n if max_input_size > 0:\n # Subtract 1 for the EOS_ID.\n input_ids = input_ids[:max_input_size - 1]\n input_ids.append(text_encoder.EOS_ID)\n batch_inputs.append(input_ids)\n if len(input_ids) > batch_length:\n batch_length = len(input_ids)\n final_batch_inputs = []\n for input_ids in batch_inputs:\n assert len(input_ids) <= batch_length\n x = input_ids + [0] * (batch_length - len(input_ids))\n final_batch_inputs.append(x)\n\n yield {\n \"inputs\": np.array(final_batch_inputs).astype(np.int32),\n }\n\n\ndef _decode_batch_sfeat_fn(p_hp, num_decode_batches, sorted_inputs,\n sorted_sfeats, vocabulary, sfeats_vocabs, batch_size, max_input_size):\n tf.logging.info(\" batch %d\" % num_decode_batches)\n # First reverse all the input sentences so that if you're going to get OOMs,\n # you'll see it in the first batch\n sorted_inputs.reverse()\n sorted_sfeats.reverse()\n\n def _get_subword_tags(subword_nb):\n if subword_nb == 1:\n feat = ['O']\n else:\n feat = ['B', 'E']\n while len(feat) < subword_nb:\n feat.insert(1, 'I')\n return \" \".join(feat)\n\n for b in range(num_decode_batches):\n tf.logging.info(\"Decoding batch %d\" % b)\n batch_length = 0\n batch_inputs = []\n batch_sfeats = []\n for inputs, sfeats in zip(sorted_inputs[b * batch_size:(b + 1) * batch_size],\n sorted_sfeats[b * batch_size:(b + 1) * batch_size]):\n\n # decompose source features\n sfeat_delimiter = p_hp.sfeat_delimiter\n sfeats_split = [sf.split(sfeat_delimiter) for sf in sfeats.split()]\n sfeats_ids = []\n sfeats_vocabs_iter = enumerate(sfeats_vocabs)\n if p_hp.use_subword_tags:\n sfeats_vocabs_iter = enumerate(sfeats_vocabs[:-1])\n for sfeat_id, vocab in sfeats_vocabs_iter:\n feat = [sf[sfeat_id] for sf in sfeats_split]\n assert len(feat) == len(inputs.split()), \"Source word and feature sequences must have the same length\"\n feat = \" \".join(feat)\n sfeats_ids.append(vocab.encode(feat))\n\n # synchonize source features and words\n input_ids = []\n new_sfeats_ids = [[] for _ in sfeats_ids]\n if p_hp.use_subword_tags:\n new_sfeats_ids.append([])\n for idx, word in enumerate(inputs.split()):\n if p_hp.vocab_type == \"subwords\":\n ste_word = vocabulary.encode_without_tokenizing(word)\n elif p_hp.vocab_type == \"tokens\":\n ste_word = vocabulary.encode(word)\n else:\n raise ValueError(\"VocabType not supported\")\n for sfeat_idx, _ in enumerate(sfeats_ids):\n new_sfeats_ids[sfeat_idx] += [sfeats_ids[sfeat_idx][idx]] * len(ste_word)\n if p_hp.use_subword_tags:\n subword_tags = _get_subword_tags(len(ste_word))\n new_sfeats_ids[sfeat_idx+1] += sfeats_vocabs[-1].encode(subword_tags)\n input_ids += ste_word\n sfeats_ids = new_sfeats_ids\n\n if max_input_size > 0:\n # Subtract 1 for the EOS_ID.\n input_ids = input_ids[:max_input_size - 1]\n for sf_idx, _ in enumerate(sfeats_ids):\n sfeats_ids[sf_idx] = sfeats_ids[sf_idx][:max_input_size - 1]\n \n input_ids.append(text_encoder.EOS_ID)\n batch_inputs.append(input_ids)\n for idx, _ in enumerate(sfeats_ids):\n sfeats_ids[idx].append(text_encoder.EOS_ID)\n batch_sfeats.append(sfeats_ids)\n \n if len(input_ids) > batch_length:\n batch_length = len(input_ids)\n final_batch_inputs = []\n final_batch_sfeats = []\n for input_ids, sfeat_ids in zip(batch_inputs, batch_sfeats):\n assert len(input_ids) <= batch_length\n x = input_ids + [0] * (batch_length - len(input_ids))\n final_batch_inputs.append(x)\n b_sfeats = []\n for sfeat_idx, _ in enumerate(sfeat_ids):\n assert len(sfeat_ids[sfeat_idx]) <= batch_length\n x = sfeat_ids[sfeat_idx] + [0] * (batch_length - len(sfeat_ids[sfeat_idx]))\n b_sfeats.append(x)\n final_batch_sfeats.append(b_sfeats)\n\n out_dict = {\n \"inputs\": np.array(final_batch_inputs).astype(np.int32)\n }\n for idx, _ in enumerate(sfeats_vocabs):\n current_feat = [f[idx] for f in final_batch_sfeats]\n out_dict[\"sfeats.\"+str(idx)] = np.array(current_feat).astype(np.int32)\n yield out_dict\n\n\ndef _interactive_input_fn(hparams, decode_hp):\n \"\"\"Generator that reads from the terminal and yields \"interactive inputs\".\n\n Due to temporary limitations in tf.learn, if we don't want to reload the\n whole graph, then we are stuck encoding all of the input as one fixed-size\n numpy array.\n\n We yield int32 arrays with shape [const_array_size]. The format is:\n [num_samples, decode_length, len(input ids), <input ids>, <padding>]\n\n Args:\n hparams: model hparams\n decode_hp: decode hparams\n Yields:\n numpy arrays\n\n Raises:\n Exception: when `input_type` is invalid.\n \"\"\"\n num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1\n decode_length = decode_hp.extra_length\n input_type = \"text\"\n p_hparams = hparams.problem_hparams\n has_input = \"inputs\" in p_hparams.input_modality\n vocabulary = p_hparams.vocabulary[\"inputs\" if has_input else \"targets\"]\n # This should be longer than the longest input.\n const_array_size = 10000\n # Import readline if available for command line editing and recall.\n try:\n import readline # pylint: disable=g-import-not-at-top,unused-variable\n except ImportError:\n pass\n while True:\n prompt = (\"INTERACTIVE MODE num_samples=%d decode_length=%d \\n\"\n \" it=<input_type> ('text' or 'image' or 'label', default: \"\n \"text)\\n\"\n \" ns=<num_samples> (changes number of samples, default: 1)\\n\"\n \" dl=<decode_length> (changes decode length, default: 100)\\n\"\n \" <%s> (decode)\\n\"\n \" q (quit)\\n\"\n \">\" % (num_samples, decode_length, \"source_string\"\n if has_input else \"target_prefix\"))\n input_string = input(prompt)\n if input_string == \"q\":\n return\n elif input_string[:3] == \"ns=\":\n num_samples = int(input_string[3:])\n elif input_string[:3] == \"dl=\":\n decode_length = int(input_string[3:])\n elif input_string[:3] == \"it=\":\n input_type = input_string[3:]\n else:\n if input_type == \"text\":\n input_ids = vocabulary.encode(input_string)\n if has_input:\n input_ids.append(text_encoder.EOS_ID)\n x = [num_samples, decode_length, len(input_ids)] + input_ids\n assert len(x) < const_array_size\n x += [0] * (const_array_size - len(x))\n features = {\n \"inputs\": np.array(x).astype(np.int32),\n }\n elif input_type == \"image\":\n input_path = input_string\n img = vocabulary.encode(input_path)\n features = {\n \"inputs\": img.astype(np.int32),\n }\n elif input_type == \"label\":\n input_ids = [int(input_string)]\n x = [num_samples, decode_length, len(input_ids)] + input_ids\n features = {\n \"inputs\": np.array(x).astype(np.int32),\n }\n else:\n raise Exception(\"Unsupported input type.\")\n for k, v in six.iteritems(\n problem_lib.problem_hparams_to_features(p_hparams)):\n features[k] = np.array(v).astype(np.int32)\n yield features\n\n\ndef show_and_save_image(img, save_path):\n try:\n import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top\n except ImportError as e:\n tf.logging.warning(\"Showing and saving an image requires matplotlib to be \"\n \"installed: %s\", e)\n raise NotImplementedError(\"Image display and save not implemented.\")\n plt.imshow(img)\n plt.savefig(save_path)\n\n\ndef _get_sorted_inputs(filename, num_shards=1, delimiter=\"\\n\"):\n \"\"\"Returning inputs sorted according to length.\n\n Args:\n filename: path to file with inputs, 1 per line.\n num_shards: number of input shards. If > 1, will read from file filename.XX,\n where XX is FLAGS.worker_id.\n delimiter: str, delimits records in the file.\n\n Returns:\n a sorted list of inputs\n\n \"\"\"\n tf.logging.info(\"Getting sorted inputs\")\n # read file and sort inputs according them according to input length.\n if num_shards > 1:\n decode_filename = filename + (\"%.2d\" % FLAGS.worker_id)\n else:\n decode_filename = filename\n\n with tf.gfile.Open(decode_filename) as f:\n text = f.read()\n records = text.split(delimiter)\n inputs = [record.strip() for record in records]\n # Strip the last empty line.\n if not inputs[-1]:\n inputs.pop()\n input_lens = [(i, len(line.split())) for i, line in enumerate(inputs)]\n sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1))\n # We'll need the keys to rearrange the inputs back into their original order\n sorted_keys = {}\n sorted_inputs = []\n for i, (index, _) in enumerate(sorted_input_lens):\n sorted_inputs.append(inputs[index])\n sorted_keys[index] = i\n return sorted_inputs, sorted_keys\n\n\ndef _get_sorted_sfeats(filename, sorted_keys, num_shards=1, delimiter=\"\\n\"):\n \"\"\"Returning sfeats sorted like inputs.\n\n Args:\n filename: path to file with sfeats, 1 per line.\n num_shards: number of input shards. If > 1, will read from file filename.XX,\n where XX is FLAGS.worker_id.\n delimiter: str, delimits records in the file.\n\n Returns:\n a sorted list of sfeats\n\n \"\"\"\n tf.logging.info(\"Getting sorted sfeats\")\n # read file and sort inputs according them according to input length.\n if num_shards > 1:\n decode_filename = filename + (\"%.2d\" % FLAGS.worker_id)\n else:\n decode_filename = filename\n\n with tf.gfile.Open(decode_filename) as f:\n text = f.read()\n records = text.split(delimiter)\n sfeats = [record.strip() for record in records]\n # Strip the last empty line.\n if not sfeats[-1]:\n sfeats.pop()\n sorted_keys = sorted(sorted_keys.items(), key=operator.itemgetter(1))\n sorted_sfeats = []\n for index, _ in sorted_keys:\n sorted_sfeats.append(sfeats[index])\n return sorted_sfeats\n\n\ndef _save_until_eos(hyp, is_image):\n \"\"\"Strips everything after the first <EOS> token, which is normally 1.\"\"\"\n hyp = hyp.flatten()\n if is_image:\n return hyp\n try:\n index = list(hyp).index(text_encoder.EOS_ID)\n return hyp[0:index]\n except ValueError:\n # No EOS_ID: return the array as-is.\n return hyp\n\n\ndef _interactive_input_tensor_to_features_dict(feature_map, hparams):\n \"\"\"Convert the interactive input format (see above) to a dictionary.\n\n Args:\n feature_map: dict with inputs.\n hparams: model hyperparameters\n\n Returns:\n a features dictionary, as expected by the decoder.\n \"\"\"\n inputs = tf.convert_to_tensor(feature_map[\"inputs\"])\n input_is_image = False if len(inputs.get_shape()) < 3 else True\n\n x = inputs\n if input_is_image:\n x = tf.image.resize_images(x, [299, 299])\n x = tf.reshape(x, [1, 299, 299, -1])\n x = tf.to_int32(x)\n else:\n # Remove the batch dimension.\n num_samples = x[0]\n length = x[2]\n x = tf.slice(x, [3], tf.to_int32([length]))\n x = tf.reshape(x, [1, -1, 1, 1])\n # Transform into a batch of size num_samples to get that many random\n # decodes.\n x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))\n\n p_hparams = hparams.problem_hparams\n input_space_id = tf.constant(p_hparams.input_space_id)\n target_space_id = tf.constant(p_hparams.target_space_id)\n\n features = {}\n features[\"input_space_id\"] = input_space_id\n features[\"target_space_id\"] = target_space_id\n features[\"decode_length\"] = (\n IMAGE_DECODE_LENGTH if input_is_image else inputs[1])\n features[\"inputs\"] = x\n return features\n\n\ndef _decode_input_tensor_to_features_dict(feature_map, hparams):\n \"\"\"Convert the interactive input format (see above) to a dictionary.\n\n Args:\n feature_map: dict with inputs.\n hparams: model hyperparameters\n\n Returns:\n a features dictionary, as expected by the decoder.\n \"\"\"\n inputs = tf.convert_to_tensor(feature_map[\"inputs\"])\n input_is_image = False\n\n x = inputs\n p_hparams = hparams.problem_hparams\n # Add a third empty dimension\n x = tf.expand_dims(x, axis=[2])\n x = tf.to_int32(x)\n input_space_id = tf.constant(p_hparams.input_space_id)\n target_space_id = tf.constant(p_hparams.target_space_id)\n\n features = {}\n features[\"input_space_id\"] = input_space_id\n features[\"target_space_id\"] = target_space_id\n features[\"decode_length\"] = (\n IMAGE_DECODE_LENGTH if input_is_image else tf.shape(x)[1] + 50)\n features[\"inputs\"] = x\n return features\n\n\ndef _decode_sfeat_tensor_to_features_dict(feature_map, hparams):\n \"\"\"Convert the interactive input format (see above) to a dictionary.\n\n Args:\n feature_map: dict with inputs and source features.\n hparams: model hyperparameters\n\n Returns:\n a features dictionary, as expected by the decoder.\n \"\"\"\n inputs = tf.convert_to_tensor(feature_map[\"inputs\"])\n src_features = [k for k in feature_map.keys() if k.startswith(\"sfeats\")]\n sfeats = {}\n for sfeat in src_features:\n sfeats[sfeat] = tf.convert_to_tensor(feature_map[sfeat])\n input_is_image = False\n\n x = inputs\n p_hparams = hparams.problem_hparams\n # Add a third empty dimension\n x = tf.expand_dims(x, axis=[2])\n x = tf.to_int32(x)\n for idx, _ in enumerate(sfeats):\n idx = str(idx)\n sfeats[\"sfeats.\"+idx] = tf.expand_dims(sfeats[\"sfeats.\"+idx], axis=[2])\n sfeats[\"sfeats.\"+idx] = tf.to_int32(sfeats[\"sfeats.\"+idx])\n input_space_id = tf.constant(p_hparams.input_space_id)\n target_space_id = tf.constant(p_hparams.target_space_id)\n\n features = {}\n features[\"input_space_id\"] = input_space_id\n features[\"target_space_id\"] = target_space_id\n features[\"decode_length\"] = (\n IMAGE_DECODE_LENGTH if input_is_image else tf.shape(x)[1] + 50)\n features[\"inputs\"] = x\n for idx, f in enumerate(sfeats):\n features[\"sfeats.\"+str(idx)] = sfeats[\"sfeats.\"+str(idx)]\n return features \n" ]
[ [ "tensorflow.convert_to_tensor", "matplotlib.pyplot.imshow", "numpy.split", "tensorflow.constant", "tensorflow.logging.warning", "tensorflow.gfile.Open", "tensorflow.shape", "tensorflow.image.resize_images", "tensorflow.reshape", "tensorflow.expand_dims", "matplotlib.pyplot.savefig", "tensorflow.contrib.framework.nest.pack_sequence_as", "tensorflow.contrib.framework.nest.flatten", "tensorflow.logging.info", "tensorflow.to_int32", "numpy.array", "tensorflow.contrib.training.HParams", "tensorflow.py_func" ] ]
chenls/MegEngine
[ "5c775d02dd0b8f20b5acc6b400cf722e92f2e86b" ]
[ "imperative/python/test/integration/test_converge.py" ]
[ "# -*- coding: utf-8 -*-\n# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nimport itertools\n\nimport numpy as np\n\nimport megengine as mge\nimport megengine.autodiff as ad\nimport megengine.functional as F\nfrom megengine import Tensor\nfrom megengine.module import Linear, Module\nfrom megengine.optimizer import SGD\n\nbatch_size = 64\ndata_shape = (batch_size, 2)\nlabel_shape = (batch_size,)\n\n\ndef minibatch_generator():\n while True:\n inp_data = np.zeros((batch_size, 2))\n label = np.zeros(batch_size, dtype=np.int32)\n for i in range(batch_size):\n # [x0, x1], sampled from U[-1, 1]\n inp_data[i, :] = np.random.rand(2) * 2 - 1\n label[i] = 0 if np.prod(inp_data[i]) < 0 else 1\n yield inp_data.astype(np.float32), label.astype(np.int32)\n\n\ndef calculate_precision(data: np.ndarray, pred: np.ndarray) -> float:\n \"\"\" Calculate precision for given data and prediction.\n\n :type data: [[x, y], ...]\n :param data: Input data\n :type pred: [[x_pred, y_pred], ...]\n :param pred: Network output data\n \"\"\"\n correct = 0\n assert len(data) == len(pred)\n for inp_data, pred_output in zip(data, pred):\n label = 0 if np.prod(inp_data) < 0 else 1\n pred_label = np.argmax(pred_output)\n if pred_label == label:\n correct += 1\n return float(correct) / len(data)\n\n\nclass XORNet(Module):\n def __init__(self):\n self.mid_layers = 14\n self.num_class = 2\n super().__init__()\n\n self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)\n self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)\n\n self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)\n\n def forward(self, x):\n x = self.fc0(x)\n x = F.tanh(x)\n x = self.fc1(x)\n x = F.tanh(x)\n x = self.fc2(x)\n return x\n\n\ndef test_training_converge():\n net = XORNet()\n opt = SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)\n gm = ad.GradManager().attach(net.parameters())\n\n def train(data, label):\n with gm:\n pred = net(data)\n loss = F.nn.cross_entropy(pred, label)\n gm.backward(loss)\n return loss\n\n def infer(data):\n return net(data)\n\n train_dataset = minibatch_generator()\n losses = []\n\n for data, label in itertools.islice(train_dataset, 2000):\n data = Tensor(data, dtype=np.float32)\n label = Tensor(label, dtype=np.int32)\n opt.clear_grad()\n loss = train(data, label)\n opt.step()\n losses.append(loss.numpy())\n\n assert np.mean(losses[-100:]) < 0.1, \"Final training Loss must be low enough\"\n\n ngrid = 10\n x = np.linspace(-1.0, 1.0, ngrid)\n xx, yy = np.meshgrid(x, x)\n xx = xx.reshape((ngrid * ngrid, 1))\n yy = yy.reshape((ngrid * ngrid, 1))\n data = mge.tensor(np.concatenate((xx, yy), axis=1).astype(np.float32))\n\n pred = infer(data).numpy()\n precision = calculate_precision(data.numpy(), pred)\n assert precision == 1.0, \"Test precision must be high enough, get {}\".format(\n precision\n )\n" ]
[ [ "numpy.linspace", "numpy.concatenate", "numpy.argmax", "numpy.mean", "numpy.random.rand", "numpy.prod", "numpy.meshgrid", "numpy.zeros" ] ]
ai-in-motion/moai
[ "e38cac046c059d2e2331ef4883bbabc5a500a5cf" ]
[ "moai/visualization/visdom/pose2d.py" ]
[ "from moai.visualization.visdom.base import Base\nfrom moai.utils.arguments import ensure_string_list\nfrom moai.utils.iterators import pairwise\n\nimport torch\nimport visdom\nimport functools\nimport typing\nimport logging\nimport numpy as np\nimport cv2\nimport colour\nimport math\nimport toolz\nfrom PIL import Image\n\nlog = logging.getLogger(__name__)\n\n__all__ = [\"Pose2d\"]\n\nclass Pose2d(Base):\n def __init__(self,\n images: typing.Union[str, typing.Sequence[str]],\n poses: typing.Union[str, typing.Sequence[str]],\n gt: typing.Union[str, typing.Sequence[str]],\n pred: typing.Union[str, typing.Sequence[str]],\n gt_masks: typing.Union[str, typing.Sequence[str]],\n pred_masks: typing.Union[str, typing.Sequence[str]],\n pose_structure: typing.Union[str, typing.Sequence[str]],\n coords: typing.Union[str, typing.Sequence[str]],\n color_gt: typing.Union[str, typing.Sequence[str]],\n color_pred: typing.Union[str, typing.Sequence[str]],\n name: str=\"default\",\n ip: str=\"http://localhost\",\n port: int=8097,\n reverse_coords: bool=False,\n rotate_image: bool=False,\n transparency: float=0.4,\n scale: float=1.0,\n use_mask: bool=True,\n ):\n super(Pose2d, self).__init__(name, ip, port)\n self.images = ensure_string_list(images)\n self.poses = ensure_string_list(poses)\n self.gt = ensure_string_list(gt)\n self.pred = ensure_string_list(pred)\n self.gt_masks = ensure_string_list(gt_masks)\n self.pred_masks = ensure_string_list(pred_masks)\n self.pose_structure = ensure_string_list(pose_structure)\n self.coords = ensure_string_list(coords)\n self.color_gt = list(map(colour.web2rgb, ensure_string_list(color_gt)))\n self.color_pred = list(map(colour.web2rgb, ensure_string_list(color_pred)))\n self.reverse = reverse_coords\n self.rotate = rotate_image\n self.transparency = transparency\n self.scale = scale\n self.use_mask = use_mask\n self.viz_pose = {\n 'human_pose2d': functools.partial(self.__draw_human_pose2d, \n self.visualizer, marker=cv2.MARKER_DIAMOND, \n rotate=self.rotate, transparency=self.transparency,\n scale=self.scale\n ),\n }\n self.xforms = { #TODO: extract these into a common module\n 'ndc': lambda coord, img: torch.addcmul(\n torch.scalar_tensor(0.5).to(coord), coord, torch.scalar_tensor(0.5).to(coord)\n ) * torch.Tensor([*img.shape[2:]]).to(coord).expand_as(coord),\n 'coord': lambda coord, img: coord,\n 'norm': lambda coord, img: coord * torch.Tensor([*img.shape[2:]]).to(coord).expand_as(coord),\n }\n self.access = lambda td, k: toolz.get_in(k.split('.'), td)\n\n @property\n def name(self) -> str:\n return self.env_name\n \n def __call__(self, tensors: typing.Dict[str, torch.Tensor]) -> None:\n for img, poses, gt, pred, gt_masks, pred_masks, pose_struct, gt_c, pred_c, coord in zip(\n self.images, self.poses, self.gt, self.pred, self.gt_masks, self.pred_masks, [self.pose_structure, ],\n self.color_gt, self.color_pred, self.coords\n ):\n gt_coord = self.access(tensors, gt).detach()\n pred_coord = self.access(tensors, pred).detach()\n gt_masks = self.access(tensors, gt_masks).detach()\n pred_masks = self.access(tensors, pred_masks).detach()\n if self.reverse:\n gt_coord = gt_coord.flip(-1)\n pred_coord = pred_coord.flip(-1)\n image = self.access(tensors, img).detach()\n self.viz_pose[poses](\n image,\n self.xforms[coord](gt_coord, image),\n self.xforms[coord](pred_coord, image),\n gt_masks if self.use_mask else torch.ones_like(gt_masks),\n pred_masks if self.use_mask else torch.ones_like(pred_masks),\n pose_struct,\n np.uint8(np.array(list(gt_c)) * 255),\n np.uint8(np.array(list(pred_c)) * 255),\n # np.uint8(np.array(list(reversed(gt_c))) * 255),\n # np.uint8(np.array(list(reversed(pred_c))) * 255),\n coord, img, img, self.name\n ) \n \n @staticmethod\n def __draw_human_pose2d(\n visdom: visdom.Visdom,\n images: torch.Tensor,\n gt_coordinates: torch.Tensor,\n pred_coordinates: torch.Tensor,\n gt_masks: torch.Tensor,\n pred_masks: torch.Tensor,\n pose_structure: typing.List[typing.List[int]],\n gt_color: typing.List[float],\n pred_color: typing.List[float],\n coord: str,\n key: str,\n win: str,\n env: str,\n marker: int,\n rotate: bool,\n transparency: float,\n scale: float,\n ):\n b, _, h, w = images.shape\n imgs = np.zeros([b, 3, int(scale * h), int(scale * w)], dtype=np.uint8) if not rotate \\\n else np.zeros([b, 3, int(scale * w), int(scale * h)], dtype=np.uint8)\n gt_coords = gt_coordinates.cpu().int()\n pred_coords = pred_coordinates.cpu().int()\n gt_coords = torch.flip(gt_coords, dims=[-1])\n pred_coords = torch.flip(pred_coords, dims=[-1])\n gt_coords = gt_coords.numpy()\n pred_coords = pred_coords.numpy()\n diagonal = torch.norm(torch.Tensor([*imgs.shape[2:]]), p=2)\n marker_size = int(0.015 * diagonal) #TODO: extract percentage param to config?\n line_size = int(0.005 * diagonal) #TODO: extract percentage param to config?\n for i in range(imgs.shape[0]):\n img = images[i, ...].cpu().numpy().transpose(1, 2, 0) * 255.0\n img = img.copy().astype(np.uint8) if img.shape[2] > 1\\\n else cv2.cvtColor(img.copy().astype(np.uint8), cv2.COLOR_GRAY2RGB)\n bg = img.copy()\n for coords, color, masks in zip(\n [gt_coords, pred_coords],\n [gt_color, pred_color],\n [gt_masks, pred_masks]\n ): \n coord_i = coords[i, ...]\n for kpts_group in pose_structure:\n for (a, b) in pairwise(kpts_group):\n # for j in range(len(kpts_group) - 1): \n if torch.sum(masks[i, a]) and torch.sum(masks[i, b]):\n start_xy = tuple(coord_i[a])\n end_xy = tuple(coord_i[b])\n X = (start_xy[0], end_xy[0])\n Y = (start_xy[1], end_xy[1])\n mX = np.mean(X)\n mY = np.mean(Y)\n length = ((Y[0] - Y[1]) ** 2 + (X[0] - X[1]) ** 2) ** 0.5\n angle = math.degrees(math.atan2(Y[0] - Y[1], X[0] - X[1]))\n stickwidth = line_size\n # polygon = cv2.ellipse2Poly(\n # (int(mX or 0), int(mY or 0)),\n # (int(length/2 or 1), int(stickwidth or 1)),\n # int(angle), 0, 360, 1\n # )\n # cv2.fillConvexPoly(bg, polygon, color.tolist())\n cv2.line(img, start_xy, end_xy, color.tolist(), thickness=line_size)\n for k, coord in enumerate(coord_i):\n if torch.sum(masks[i, k]):\n if marker < 0:\n cv2.circle(bg, \n tuple(coord), \n marker_size, color.tolist(), thickness=line_size\n )\n else:\n cv2.drawMarker(bg, \n tuple(coord),\n color.tolist(),\n marker, marker_size, line_size\n )\n\n img = cv2.addWeighted(bg, transparency, img, 1 - transparency, 0)\n h, w = img.shape[:2]\n if scale != 1.0:\n img = np.array(Image.fromarray(img).resize(\n (int(w * scale), int(h * scale)), Image.ANTIALIAS\n ))\n imgs[i, ...] = img.transpose(2, 0, 1) if not rotate\\\n else cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE).transpose(2, 0, 1)\n visdom.images(\n imgs,\n # np.flip(imgs, axis=1),\n win=win,\n env=env,\n opts={\n 'title': key,\n 'caption': key,\n 'jpgquality': 50,\n }\n )\n" ]
[ [ "torch.Tensor", "torch.scalar_tensor", "torch.sum", "numpy.mean", "torch.flip", "torch.ones_like" ] ]
flaneuse/cvisb-antibody-analysis
[ "0780d7c6592cdbe5fd1faf141a8a2ca06a7bfa96" ]
[ "src/calculations/adcd.py" ]
[ "# @name: adcd.py\n# @summary: Calculations for ADCD experiment\n# @description: Imports fluorescence data from flow cytometry antibody-dependent complement detection\n# @sources:\n# @depends: pandas, numpy, scipy\n# @author: Laura Hughes\n# @email: [email protected]\n# @license: Apache-2.0\n# @date: 17 April 2018\n\nimport pandas as pd\nimport numpy as np\nimport os\n\nfrom scipy.stats import percentileofscore\n# os.chdir('/Users/laurahughes/GitHub/cvisb-antibody-analysis/src/calculations')\nfrom calculations.SysSerologyExpt import SysSerologyExpt\n\n\n# [Calculate ratios + average values] -------------------------------------------------------------------\n# NOTE: this is where you adjust the calculations\nclass ADCD(SysSerologyExpt):\n # !!!! [1/3] !!!! Adjust the columns within the Excel sheet\n # define which columns used in the calculations\n # format: {'column name in FlowJo spreadsheet': 'what to rename it to'}\n fluor_cols = {\n 'beads/PerCP-Cy5-5-A, SSC-A subset | Geometric Mean (FITC-A)': 'MFI_all',\n 'beads/PerCP-Cy5-5-A, SSC-A subset/FITC-A subset | Freq. of Parent': 'pct_fluor',\n 'beads/PerCP-Cy5-5-A, SSC-A subset/FITC-A subset | Geometric Mean (FITC-A)': 'MFI'\n }\n\n def calc_score(self):\n # !!!! [2/3] CALCULATION DEFINITION !!!! Change as needed\n # Creates a new column called 'fluor_score' which calculates the fluorescence score\n self.df['fluor_score'] = self.df.MFI * self.df.pct_fluor / self.scale_factor\n self.df['fluor_score_type'] = 'phagocytotic score'\n\n self.run_qc()\n\n return self.df\n\n# !!!! [3/3] Change QC !!!!\n def run_qc(self):\n \"\"\"\n Function used to run some basic quality control on the fluorescence scores\n \"\"\"\n self.df['fluor_percentile'] = self.df.fluor_score.apply(lambda x: percentileofscore(self.df.fluor_score, x))\n\n def __init__(self, fluorfile, platefile, expt_dict):\n super().__init__(fluorfile, platefile, expt_dict)\n\n\n\n# np.trapz(np.array([22.7493,\t17.768735,\t13.9848]), np.array([150, 750, 3750])) / np.trapz(np.array([86.765295,\t94.033755,\t25.95186]), np.array([150, 750, 3750]))\n#\n# np.trapz(np.array([13.9848, 17.768735,\t22.7493]), np.log10([1/3750, 1/750, 1/150])) / np.trapz(np.array([25.95186, 94.033755,\t86.765295]), np.log10([1/3750, 1/750, 1/150]))\n#\n# np.trapz(np.array([13.9848, 17.768735,\t22.7493]), np.array([1/3750, 1/750, 1/150])) / np.trapz(np.array([25.95186, 94.033755,\t86.765295]), np.array([1/3750, 1/750, 1/150]))\n" ]
[ [ "scipy.stats.percentileofscore" ] ]
KBeno/firefly-lca
[ "a081b05f5d66951792bd00d2bb6ae1f8e43235e0" ]
[ "firepy/calculation/energy.py" ]
[ "import shutil\nimport subprocess\nimport uuid\nfrom json import JSONDecodeError\nfrom typing import List, Union, Tuple\nimport requests\nimport json\nimport logging\nimport math\nfrom pathlib import Path\n\nfrom eppy.modeleditor import IDF\nimport esoreader\nimport pandas as pd\nimport numpy as np\nfrom pandas import Series\n\nfrom firepy.model import HVAC, Heating, Cooling, NaturalVentilation\nfrom firepy.model.building import Construction, OpaqueMaterial, WindowMaterial, ObjectLibrary, BuildingSurface, \\\n Building, Zone, Ref\n\nlogger = logging.getLogger(__name__)\n\n\nclass RemoteConnection:\n\n def __init__(self, host: str, port: int):\n self.host = host\n self.port = port\n if not host.startswith('http'):\n self.host = 'http://' + self.host\n self.url = '{host}:{port}'.format(host=self.host, port=self.port)\n\n def setup(self, name: str = None, epw: str = None, idd: str = None, variables: dict = None):\n \"\"\"\n\n :param name: name of the calculation setup\n :param epw: full epw string\n :param idd: full idd string\n :param variables: dict of variables\n :return:\n \"\"\"\n url = self.url + '/setup'\n\n if epw is not None:\n if name is None:\n raise Exception('Please provide a name for the setup')\n logger.debug('Setting up EPW on server')\n requests.post(url=url, params={'name': name, 'type': 'epw'}, data=epw)\n\n if idd is not None:\n logger.debug('Setting up IDD on server')\n requests.post(url=url, params={'type': 'idd'}, data=idd)\n\n if variables is not None:\n logger.debug('Setting up variables dict')\n requests.post(url=url, params={'type': 'vars'}, json=variables)\n\n def check(self, name) -> bool:\n url = self.url + '/check'\n\n response = requests.get(url=url, params={'name': name})\n\n if response.text == \"OK\":\n logger.debug('Server check response: {}'.format(response.text))\n return True\n else:\n logger.debug('Server check response: {}'.format(response.text))\n return False\n\n def run(self, name: str, idf: IDF, sim_id: str = None) -> str:\n\n url = self.url + '/run'\n logger.debug('Running simulation at: {}'.format(url))\n data = idf.idfstr()\n params = {'name': name}\n if sim_id is not None:\n params['id'] = sim_id\n response = requests.post(url=url, params=params, data=data)\n return response.text\n\n def results(self, variables: List[str], name: str, sim_id: str, typ: str, period: str):\n url = self.url + '/results'\n logger.debug('Requesting results from: {}'.format(url))\n payload = {'variables': variables, 'name': name, 'id': sim_id, 'type': typ, 'period': period}\n response = requests.get(url=url, params=payload)\n logger.debug('Response from server: {}'.format(response.text))\n return response\n\n def results_detailed(self, variable: str, name: str, sim_id: str, typ: str, period: str):\n url = self.url + '/results/detailed'\n logger.debug('Requesting detailed results from: {}'.format(url))\n payload = {'variable': variable, 'name': name, 'id': sim_id, 'type': typ, 'period': period}\n response = requests.get(url=url, params=payload)\n return response.json()\n\n def clean_up(self, name: str) -> str:\n url = self.url + '/cleanup'\n logger.debug('Cleaning up server')\n response = requests.get(url=url, params={'name': name})\n return response.text\n\n def drop_result(self, name: str, sim_id: str) -> str:\n url = self.url + '/cleanup/result'\n logger.debug('Deleting result on server for id: {id}'.format(id=sim_id))\n response = requests.get(url=url, params={'name': name, 'id': sim_id})\n return response.text\n\n\nclass EnergyPlusSimulation:\n\n # TODO separate remote and local class\n\n var_dict = {\n 'zone': {\n 'heating': 'Zone Ideal Loads Supply Air Total Heating Energy',\n 'cooling': 'Zone Ideal Loads Supply Air Total Cooling Energy',\n 'infiltration': 'Zone Infiltration Total Heat Loss Energy',\n 'solar gains': 'Zone Windows Total Transmitted Solar Radiation Energy',\n 'glazing loss': 'Zone Windows Total Heat Loss Energy',\n 'opaque loss': 'Zone Opaque Surface Outside Face Conduction Loss Energy',\n 'ventilation': 'Zone Ventilation Sensible Heat Loss Energy',\n 'lights': 'Zone Lights Electric Energy',\n 'equipment': 'Zone Electric Equipment Electric Energy',\n 'other': 'Zone Other Equipment Total Heating Energy',\n 'people': 'Zone People Total Heating Energy'\n },\n 'surface': {\n 'opaque loss': 'Surface Average Face Conduction Heat Transfer Energy',\n 'glazing loss': 'Surface Window Heat Loss Energy',\n 'glazing gain': 'Surface Window Heat Gain Energy',\n 'conduction rate': 'Surface Average Face Conduction Heat Transfer Rate per Area'\n },\n 'balance': {\n 'internal gain': 'Zone Air Heat Balance Internal Convective Heat Gain Rate',\n 'convective': 'Zone Air Heat Balance Surface Convection Rate',\n 'interzone air': 'Zone Air Heat Balance Interzone Air Transfer Rate',\n 'outdoor air': 'Zone Air Heat Balance Outdoor Air Transfer Rate',\n 'system air': 'Zone Air Heat Balance System Air Transfer Rate',\n 'system convective': 'Zone Air Heat Balance System Convective Heat Gain Rate',\n 'air storage': 'Zone Air Heat Balance Air Energy Storage Rate',\n 'deviation': 'Zone Air Heat Balance Deviation Rate'\n }\n }\n\n units = {\n 'heating': 'J',\n 'cooling': '-J',\n 'infiltration': '-J',\n 'solar gains': 'J',\n 'glazing loss': '-J',\n 'opaque loss': '-J',\n 'ventilation': '-J',\n 'lights': 'J',\n 'equipment': 'J',\n 'people': 'J',\n 'other': 'J',\n\n 'glazing gain': 'J',\n 'conduction rate': 'W/m2',\n\n 'internal gain': 'W',\n 'convective': 'W',\n 'interzone air': 'W',\n 'outdoor ait': 'W',\n 'system air': 'W',\n 'system convective': 'W',\n 'air storage': 'W',\n 'deviation': 'W',\n }\n\n def __init__(self, idf: IDF = None, epw_path: str = None, epw: str = None, output_freq: str = 'monthly',\n typ: str = 'local', output_directory: str = None, remote_server: RemoteConnection = None,\n ep_exe_path = None):\n \"\"\"\n A class to run EnergyPlus simulations either locally or remotely on a server\n\n :param idf: eppy IDF instance to hold model information\n :param epw_path: path to the weather file\n :param epw: full epw string from the weather file\n :param output_freq: output will be saved at this frequency and any lower frequency\n (e.g. monthly, annual, runperiod)\n :param typ: 'local' or 'remote'; either output_directory need to be set (for local), or server (for remote)\n :param output_directory: a directory path to save EnergyPlus output to\n :param remote_server: a RemoteConnection instance that can connect to the EnergyPlus server\n \"\"\"\n\n self.idf = idf\n if self.idf is not None:\n self.idf.epw = epw_path\n\n self.typ = typ\n if epw is None:\n if epw_path is not None:\n with open(epw_path, 'r') as epw_file: # 'rb' for binary open?\n self.epw = epw_file.read()\n else:\n self.epw = None\n else:\n self.epw = epw\n\n if typ == 'local':\n self.output_directory = output_directory\n if typ == 'remote':\n self.server = remote_server\n\n self.output_frequency = output_freq\n\n self.ep_exe_path = ep_exe_path\n\n @property\n def output_frequency(self):\n return self._output_frequency\n\n @output_frequency.setter\n def output_frequency(self, output_freq: str):\n self._output_frequency = []\n freq_list = ['runperiod', 'annual', 'monthly', 'daily', 'hourly', 'timestep']\n if output_freq not in freq_list:\n raise Exception('Parameter \"output_freq\" can be one of: {i}'.format(i=', '.join(freq_list)))\n freq_index = freq_list.index(output_freq)\n self._output_frequency = freq_list[:freq_index+1]\n\n def run(self, **kwargs) -> str:\n if self.idf is None:\n raise Exception('No idf set, unable to run simulation')\n if self.typ == 'local':\n local_response = self.run_local(**kwargs)\n return local_response\n elif self.typ == 'remote':\n server_response = self.run_remote(**kwargs)\n return server_response\n\n def run_local(self, name: str, sim_id: str = None) -> str:\n if sim_id is None:\n sim_id = str(uuid.uuid1())\n output_path = Path(f'{self.output_directory}_{name}') / sim_id\n if not output_path.exists():\n output_path.mkdir(parents=True)\n\n # self.idf.run(output_directory=str(output_path))\n\n idf_address = output_path / \"model.idf\"\n\n with idf_address.open('w') as idf_file:\n idf_file.write(self.idf.idfstr())\n\n # compose Energy Plus command\n cmd = [self.ep_exe_path]\n cmd += [\"--output-directory\", str(output_path)] # output folder\n cmd += [\"--weather\", self.idf.epw] # weather file\n cmd += [\"--idd\", self.idf.iddname] # input data dictionary\n cmd += [str(idf_address)] # idf input file\n\n subprocess.run(cmd)\n\n return sim_id\n\n def run_remote(self, name: str, force_setup: bool = False, sim_id: str = None) -> str:\n # check first\n if not self.server.check(name) or force_setup:\n self.setup_server(name=name)\n # than tun\n if sim_id is not None:\n server_response = self.server.run(name=name, idf=self.idf, sim_id=sim_id)\n else:\n server_response = self.server.run(name=name, idf=self.idf)\n return server_response\n\n def setup_server(self, name: str, epw: str = None):\n variables = {'var_dict': EnergyPlusSimulation.var_dict, 'units': EnergyPlusSimulation.units}\n if self.epw is None and epw is None:\n raise Exception('No epw is set, please provide epw before setting up the server')\n if epw is not None:\n self.epw = epw\n self.server.setup(name=name, epw=self.epw, variables=variables)\n # optionally we could set the idd\n\n def clear_outputs(self):\n # clear all output variables\n while len(self.idf.idfobjects['Output:Variable']) > 0:\n self.idf.popidfobject('Output:Variable', 0)\n\n def set_outputs(self, *args, typ: str = None):\n \"\"\"\n options:\n ZONE ENERGY (from Honeybee)\n Zone Ideal Loads Supply Air {type} {energy} Cooling Energy\n for type in [Total, Sensible, Latent]\n for energy in [Heating, Cooling]\n Cooling Coil Electric Energy\n Chiller Electric Energy\n Boiler Heating Energy\n Heating Coil Total Heating Energy\n Heating Coil Gas Energy\n Heating Coil Electric Energy\n Humidifier Electric Energy\n Fan Electric Energy\n Zone Ventilation Fan Electric Energy\n Zone Lights Electric Energy\n Zone Electric Equipment Electric Energy\n Earth Tube Fan Electric Energy\n Pump Electric Energy\n Zone VRF Air Terminal Cooling Electric Energy\n Zone VRF Air Terminal Heating Electric Energy\n VRF Heat Pump Cooling Electric Energy\n VRF Heat Pump Heating Electric Energy\n\n ZONE GAINS AND LOSSES (from Honeybee)\n Zone Windows Total Transmitted Solar Radiation Energy\n Zone Ventilation Sensible Heat Loss Energy\n Zone Ventilation Sensible Heat Gain Energy\n\n Zone People {type} Heating Energy\n Zone Ideal Loads Zone {type} Heating Energy\n Zone Ideal Loads Zone {type} Cooling Energy\n Zone Infiltration {type} Heat Loss Energy\n Zone Infiltration {type} Heat Gain Energy\n for type in [Total, Sensible, Latent]\n\n ZONE COMFORT (from Honeybee)\n Zone Operative Temperature\n Zone Mean Air Temperature\n Zone Mean Radiant Temperature\n Zone Air Relative Humidity\n\n COMFORT MAP (from Honeybee)\n Zone Ventilation Standard Density Volume Flow Rate\n Zone Infiltration Standard Density Volume Flow Rate\n Zone Mechanical Ventilation Standard Density Volume Flow Rate\n Zone Air Heat Balance Internal Convective Heat Gain Rate\n Zone Air Heat Balance Surface Convection Rate\n Zone Air Heat Balance System Air Transfer Rate\n Surface Window System Solar Transmittance\n\n HVAC METRICS (from Honeybee)\n System Node Standard Density Volume Flow Rate\n System Node Temperature\n System Node Relative Humidity\n Zone Cooling Setpoint Not Met Time\n Zone Heating Setpoint Not Met Time\n\n SURFACE TEMPERATURE (from Honeybee)\n Surface Outside Face Temperature\n Surface Inside Face Temperature\n\n SURFACE ENERGY (from Honeybee)\n Surface Average Face Conduction Heat Transfer Energy\n Surface Window Heat Loss Energy\n Surface Window Heat Gain Energy\n\n GLAZING SOLAR (from Honeybee)\n Surface Window Transmitted Beam Solar Radiation Energy\n Surface Window Transmitted Diffuse Solar Radiation Energy\n Surface Window Transmitted Solar Radiation Energy\n\n :param args: 'heating' / 'cooling' / etc.\n :param typ: 'zone' / 'surface'\n :return: None\n \"\"\"\n if 'all' in args:\n if typ is not None:\n for var in EnergyPlusSimulation.var_dict[typ.lower()].values():\n self.add_variable(var)\n else:\n for typ in EnergyPlusSimulation.var_dict.keys():\n for var in EnergyPlusSimulation.var_dict[typ].values():\n self.add_variable(var)\n else:\n if typ is not None:\n for var in args:\n self.add_variable(EnergyPlusSimulation.var_dict[typ.lower()][var])\n else:\n raise Exception('Please specify output type: {t}'.format(\n t=' or '.join(EnergyPlusSimulation.var_dict.keys())))\n\n def add_variable(self, var_name: str):\n variable_names = [ov.Variable_Name for ov in self.idf.idfobjects['Output:Variable']]\n if var_name not in variable_names:\n for output_freq in self.output_frequency:\n self.idf.newidfobject(\n key='Output:Variable',\n Key_Value='*',\n Variable_Name=var_name,\n Reporting_Frequency=output_freq\n )\n else:\n output_frequencies = [output_var.Reporting_Frequency\n for output_var in self.idf.idfobjects['Output:Variable']\n if output_var.Variable_Name == var_name]\n for output_freq in self.output_frequency:\n if output_freq not in output_frequencies:\n self.idf.newidfobject(\n key='Output:Variable',\n Key_Value='*',\n Variable_Name=var_name,\n Reporting_Frequency=output_freq\n )\n\n def results(self, variables: Union[str, List[str]], name: str = None, sim_id: str = None,\n typ: str = 'zone', period: str = 'monthly'):\n if self.typ == 'local':\n if name is None:\n raise Exception('Please provide \"name\" to access local results')\n if sim_id is None:\n raise Exception('Please provide simulation id to access local results')\n return self.results_local(variables=variables, name=name, sim_id=sim_id, typ=typ, period=period)\n elif self.typ == 'remote':\n if name is None:\n raise Exception('Please provide \"name\" to access remote results')\n if sim_id is None:\n raise Exception('Please provide simulation id to access remote results')\n return self.results_remote(variables=variables, name=name, sim_id=sim_id, typ=typ, period=period)\n\n def results_local(self, variables: Union[str, List[str]], name: str, sim_id: str,\n typ: str = 'zone', period: str = 'monthly'):\n if variables == 'all':\n variables = EnergyPlusSimulation.var_dict[typ.lower()].keys()\n\n elif isinstance(variables, str):\n variables = [variables]\n\n simulation_address = Path(f'{self.output_directory}_{name}') / sim_id\n\n if not simulation_address.exists():\n message = 'No result directory for id: {i}'.format(i=sim_id)\n logger.debug(message)\n return message\n\n end_path = simulation_address / 'eplusout.end'\n with end_path.open('r') as end_file:\n end_success = end_file.readline()\n logger.debug(end_success)\n if 'EnergyPlus Completed Successfully' not in end_success:\n message = 'Simulation failed for id: {i}'.format(i=sim_id)\n logger.info(message)\n return message\n\n eso_path = simulation_address / 'eplusout.eso'\n if not eso_path.exists():\n message = 'No result for id: {i}'.format(i=sim_id)\n logger.debug(message)\n return message\n\n eso = esoreader.read_from_path(str(eso_path))\n res_dfs = []\n for var in variables:\n var_name = EnergyPlusSimulation.var_dict[typ][var]\n df = eso.to_frame(var_name, frequency=period)\n df = df.sum(axis='columns')\n df.name = var\n if EnergyPlusSimulation.units[var] == 'J': # Convert to kWh\n df /= (3.6*1e6)\n elif EnergyPlusSimulation.units[var] == '-J':\n df /= -(3.6 * 1e6)\n res_dfs.append(df)\n\n return pd.concat(res_dfs, axis='columns')\n\n def results_remote(self, variables: Union[str, List[str]], name: str, sim_id: str,\n typ: str = 'zone', period: str = 'monthly') -> pd.DataFrame:\n if variables == 'all':\n variables = EnergyPlusSimulation.var_dict[typ.lower()].keys()\n\n elif isinstance(variables, str):\n variables = [variables]\n\n response = self.server.results(variables, name, sim_id, typ, period)\n\n try:\n return pd.read_json(response.json(), orient='split')\n except JSONDecodeError:\n return response.text\n\n def results_detailed(self, variable: str, name: str = None, sim_id: str = None,\n typ: str = 'zone', period: str = 'monthly'):\n if self.typ == 'local':\n if name is None:\n raise Exception('Please provide \"name\" to access local results')\n if sim_id is None:\n raise Exception('Please provide \"simulation id\" to access local results')\n return self.results_detailed_local(variable=variable, name=name, sim_id=sim_id,\n typ=typ, period=period)\n elif self.typ == 'remote':\n if name is None:\n raise Exception('Please provide \"name\" to access remote results')\n if sim_id is None:\n raise Exception('Please provide \"simulation id\" to access remote results')\n return self.results_detailed_remote(variable=variable, name=name, sim_id=sim_id,\n typ=typ, period=period)\n\n def results_detailed_local(self, variable: str, name: str, sim_id: str, typ: str, period: str):\n\n simulation_address = Path(f'{self.output_directory}_{name}') / sim_id\n eso_path = simulation_address / 'eplusout.eso'\n eso = esoreader.read_from_path(str(eso_path))\n\n var_name = EnergyPlusSimulation.var_dict[typ][variable]\n df = eso.to_frame(var_name, frequency=period)\n if EnergyPlusSimulation.units[variable] == 'J': # Convert to kWh\n df /= (3.6 * 1e6)\n elif EnergyPlusSimulation.units[variable] == '-J':\n df /= -(3.6 * 1e6)\n\n return df\n\n def results_detailed_remote(self, variable: str, name: str, sim_id: str,\n typ: str, period: str):\n\n response_json = self.server.results_detailed(variable=variable, name=name, sim_id=sim_id,\n typ=typ, period=period)\n return pd.read_json(response_json, orient='split')\n\n def drop_local_result(self, name: str, sim_id: str):\n simulation_address = Path(f'{self.output_directory}_{name}') / sim_id\n if simulation_address.exists():\n shutil.rmtree(simulation_address)\n\n\nclass SteadyStateCalculation:\n\n month_lengths = [\n 744,\n 672,\n 744,\n 720,\n 744,\n 720,\n 744,\n 744,\n 720,\n 744,\n 720,\n 744\n ] # in hours\n\n year_length = 8760\n\n def __init__(self, weather_data: Union[pd.DataFrame, Path, str] = None):\n self.weather_data = weather_data\n\n @property\n def weather_data(self) -> pd.DataFrame:\n return self._weather_data\n\n @weather_data.setter\n def weather_data(self, data):\n if isinstance(data, pd.DataFrame):\n self._weather_data = data\n elif isinstance(data, (Path, str)):\n if isinstance(data, str):\n data = Path(data)\n self._weather_data = pd.read_csv(str(data), header=[0,1], index_col=[0,1])\n elif data is None:\n self._weather_data = None\n else:\n raise Exception('Only Path, str or pandas DataFrame can be parsed to weather data')\n\n @staticmethod\n def generate_weather_data(epw: Path = None) -> pd.DataFrame:\n \"\"\"\n Generate weather data as pandas DataFrame if epw is supplied, weather data will be calculated\n from the epw, if not, a blank DataFrame will be created to be filled by the user\n :param epw: the weather file in .epw format\n :return: pandas DataFrame\n \"\"\"\n index_labels = [('Monthly', '{m:02n}'.format(m=m)) for m in range(1, 13)] + [('Yearly', 'Yearly')]\n\n column_labels = [('External Temperature', 'Mean')]\n orientation_list = ['North', 'NorthEast', 'East', 'SouthEast', 'South', 'SouthWest', 'West', 'NorthWest']\n column_labels += [('Total Solar Radiation Energy', orientation) for orientation in orientation_list]\n\n cols = pd.MultiIndex.from_tuples(column_labels)\n indx = pd.MultiIndex.from_tuples(index_labels)\n weather_data = pd.DataFrame(columns=cols, index=indx)\n if epw is None:\n return weather_data\n else:\n raise Exception('epw data generation is not implemented yet')\n\n def u_value(self, construction: Ref, library: ObjectLibrary, surface_type=\"WALL\"):\n \"\"\"\n works with opaque constructions and simple glazing system\n TODO inhomogenity in construction\n TODO effect of screws, fixing elements\n TODO air layers\n \"\"\"\n\n surface_heat_resistance = {\n # (R_si, R_se)\n 'ROOF': (0.1, 0.04),\n 'CEILING': (0.1, 0.04),\n 'EXPOSEDFLOOR': (0.17, 0.04),\n 'FLOOR': (0.17, 0.04),\n 'SLABONGRADE': (0.17, 0.04),\n 'WALL': (0.13, 0.04)\n # to be continued...\n }\n try:\n rs_i, rs_e = surface_heat_resistance[surface_type.upper()]\n except KeyError:\n raise Exception('No heat transfer coefficient defined for surface of type: {st}'.format(st=surface_type))\n\n r_value = rs_i + rs_e\n\n u_value_win = 0\n\n construction_obj = library.get(construction)\n for mat in construction_obj.Layers:\n material = library.get(mat)\n if isinstance(material, OpaqueMaterial):\n r_value += material.Thickness / material.Conductivity\n elif isinstance(material, WindowMaterial):\n u_value_win = material.UValue\n else:\n message = \"Layer in construction needs to be either OpaqueMaterial or WindowMaterial: \"\n message += \"{material} - {t}\".format(material=material.RefName, t=material.ObjType)\n raise Exception(message)\n if u_value_win != 0:\n u_value = u_value_win\n else:\n u_value = 1 / r_value\n return u_value\n\n def u_value_floor_to_ground(self, surface: BuildingSurface, wall_thickness: float, library: ObjectLibrary,\n soil_type='sand') -> float:\n \"\"\"\n Calculate U value of a floor-to-ground based on ISO 13370 Standard\n :param surface:\n :param wall_thickness:\n :param library:\n :param soil_type:\n :return:\n \"\"\"\n # TODO underground wall!\n\n if surface.SurfaceType.lower() not in ['slabongrade', 'floor']:\n raise Exception(\n \"U value calculation of Floor to Ground not suitable for {st}!\".format(st=surface.SurfaceType))\n\n soil_conductivity = {\n # W/mK\n 'clay': 1.5, # agyag\n 'slit': 1.5, # iszap\n 'sand': 2.0, # homok\n 'gravel': 2.0, # kavics\n 'stone': 3.5\n }\n\n soil_heat_store_capacity = {\n # J/m3K\n 'clay': 3e6,\n 'slit': 3e6,\n 'sand': 2e6,\n 'gravel': 2e6,\n 'stone': 2e6\n }\n\n def heat_resistance(construction):\n\n r_value = 0\n for mat in construction.Layers:\n material = library.get(mat)\n if isinstance(material, OpaqueMaterial):\n r_value += material.Thickness / material.Conductivity\n else:\n raise Exception('Cannot calculate R value for material: {}'.format(material))\n return r_value\n\n # TODO perimeter of Building, not one surface!!\n # TODO distinguish between heated and non-heated in perimeter!\n\n # characteristic size:\n B = surface.area() / (0.5 * surface.perimeter())\n\n # Resistance values\n R_si, R_se = 0.17, 0.04\n R_f = heat_resistance(library.get(surface.Construction))\n\n Lambda = soil_conductivity[soil_type]\n\n # equivalent thickness:\n w = wall_thickness\n d_t = w + Lambda * (R_f + R_si)\n\n if d_t >= B: # equivalent_thickness >= characteristic_size\n u_value = Lambda / (0.457 * B + d_t)\n else: # equivalent_thickness < characteristic_size\n u_value = 2 * Lambda / (math.pi * B + d_t) * math.log(math.pi * B / d_t + 1)\n\n return u_value\n\n def g_value(self, construction: Construction, library: ObjectLibrary):\n \"\"\"\n works only with simple glazing system\n \"\"\"\n g_value_win = 0\n for mat in construction.Layers:\n material = library.get(mat)\n if isinstance(material, WindowMaterial):\n g_value_win = material.gValue\n else:\n raise Exception('Cannot calculate g_value for material: {m}'.format(m=material))\n return g_value_win\n\n def heat_store_capacity(self, obj: Union[Construction, Zone, Building], library: ObjectLibrary):\n \"\"\"\n Calculate heat store capacity of Construction / Zone / Building\n For constructions layers from inside are considered until they reach any of the following condition:\n - we reach the first insulation layer\n - we reach 10 cm into the construction\n - we reach the the 1/2 of the construction thickness\n :param obj: Construction, Zone or Building\n :param library: Object library that holds the data of the Materials\n :return: kappa value in [J/m^2*K] for Construction and in [J/K] for Zone\n \"\"\"\n\n if isinstance(obj, Construction):\n d = 0 # [m] position in the construction from inside\n kappa = 0\n # layers from inside to outside\n for layer in obj.Layers[::-1]:\n material = library.get(layer)\n if isinstance(material, OpaqueMaterial):\n if material.Conductivity < 0.1: # insulation material\n break\n elif d + material.Thickness >= min(obj.thickness(library) / 2, 0.1):\n # we reached the 1/2 of the construction thickness or 10 cm\n t = min(obj.thickness(library) / 2, 0.1) - d\n kappa += material.Density * t * material.SpecificHeat\n d += t\n break\n else:\n kappa += material.Density * material.Thickness * material.SpecificHeat\n d += material.Thickness\n else:\n raise Exception('Heat store capacity cannot be calculated for: {m}'.format(m=material))\n return kappa # [J/m^2*K]\n\n elif isinstance(obj, Zone):\n capacity = 0\n for surface in obj.BuildingSurfaces:\n if surface.SurfaceType.upper() in [\"WALL\", \"ROOF\", \"CEILING\", \"FLOOR\", \"SLABONGRADE\"]:\n construction = library.get(surface.Construction)\n capacity += surface.area_net() * self.heat_store_capacity(construction, library)\n for internal in obj.InternalMasses:\n construction = library.get(internal.Construction)\n capacity += 2 * internal.Area * self.heat_store_capacity(construction, library)\n # we take this 2 times, because both sides of the internal structures are exposed to this zone\n return capacity # [J/K]\n\n elif isinstance(obj, Building):\n capacity = 0\n for zone in obj.Zones:\n capacity += self.heat_store_capacity(zone, library)\n return capacity # [J/K]\n\n else:\n raise Exception('Type of parameter \"obj\" needs to be one of: Construction, Zone, Building')\n\n def sum_AU_envelope(self, zone: Zone, library: ObjectLibrary):\n \"\"\"\n Calculate Summa A*U for the envelope surfaces in [W/K]\n TODO simplified correction factor for heatbridges\n \"\"\"\n\n sum__au = 0\n\n for surface in zone.BuildingSurfaces:\n\n if surface.OutsideBoundaryCondition.lower() == \"outdoors\":\n\n u_value = self.u_value(construction=surface.Construction, library=library,\n surface_type=surface.SurfaceType)\n sum__au += u_value * surface.area_net()\n for window in surface.Fenestration:\n u_value = self.u_value(construction=window.Construction, library=library,\n surface_type=surface.SurfaceType)\n sum__au += u_value * window.area()\n\n return sum__au # [W/K]\n\n def sum_AU_ground(self, zone: Zone, library: ObjectLibrary):\n \"\"\"\n Calculate Summa A*U for ground contact surfaces in [W/K]\n \"\"\"\n\n # get average wall thickness of zone for floor U value calculation\n thickness_list = [library.get(surface.Construction).thickness(library) for surface in zone.BuildingSurfaces if\n surface.SurfaceType.lower() == 'wall']\n wall_thickness = sum(thickness_list) / len(thickness_list)\n\n sum__au = 0\n\n for surface in zone.BuildingSurfaces:\n\n if surface.OutsideBoundaryCondition.lower() in [\"ground\", \"othersideconditionsmodel\"]:\n # OtherSideConditionsModel in case of Ground Domain\n\n u_value = self.u_value_floor_to_ground(surface, wall_thickness, library) # define to soil type?\n sum__au += u_value * surface.area()\n\n return sum__au # [W/K]\n\n def sum_lpsi_ground(self, zone: Zone):\n \"\"\"\n Calculate the heat loss through floor-to-ground perimeter heat bridge in [W/K]\n TODO not implemented yet\n :param zone:\n :return:\n \"\"\"\n return None\n\n def sum_lpsi_envelope(self, zone: Zone):\n \"\"\"\n Calculate the heat loss through envelope heat bridges in [W/K]\n TODO not implemented yet\n :param zone:\n :return:\n \"\"\"\n return None\n\n def heat_transmission_direct(self, zone: Zone, library: ObjectLibrary):\n \"\"\"\n Calculate total direct heat transmission through envelope surfaces (H_tr_D) in [W/K]\n TODO only sum A*U is calculated, sum L*psi and point heat bridges are neglected\n :param zone:\n :param library\n :return:\n \"\"\"\n h_tr_d = self.sum_AU_envelope(zone=zone, library=library)\n\n return h_tr_d # [W/K]\n\n def heat_transmission_ground(self, zone: Zone, library: ObjectLibrary):\n \"\"\"\n Calculate total heat transmission through ground contact surfaces (H_tr_T) in [W/K]\n TODO only sum A*U is calculated, sum L*psi and point heat bridges are neglected\n :param zone:\n :param library\n :return:\n \"\"\"\n h_tr_t = self.sum_AU_ground(zone=zone, library=library)\n\n return h_tr_t # [W/K]\n\n def heat_energy_transmission(self, zone: Zone, hvac: HVAC, library: ObjectLibrary, heating=True) -> pd.Series:\n \"\"\"\n Calculate the heat transmission (gain or loss) of a zone (Q_tr) in [kWh]\n :param zone:\n :param hvac:\n :param library:\n :return: Heat transmission for each month of the year as pandas Series\n \"\"\"\n h_tr_d = self.heat_transmission_direct(zone, library)\n h_tr_t = self.heat_transmission_ground(zone, library)\n if heating:\n theta_i = hvac.Heating.set_point # heating setpoint temperature\n else: # cooling\n theta_i = hvac.Cooling.set_point # cooling setpoint temperature\n # monthly mean temperature (pd.Series)\n theta_e_monthly = self.weather_data.loc['Monthly', ('External Temperature', 'Mean')]\n # yearly mean temperature (float)\n theta_e_year = self.weather_data.loc[('Yearly', 'Yearly'), ('External Temperature', 'Mean')]\n # length of months (pd.Series)\n delta_t = pd.Series(data=SteadyStateCalculation.month_lengths, index=[str(i) for i in range(1, 13)])\n\n q_tr = ((h_tr_d)*(theta_i - theta_e_monthly) + h_tr_t * (theta_i - theta_e_year)) * delta_t / 1000\n\n return q_tr # [kWh]\n\n def heat_natural_ventilation(self, zone: Zone, hvac: HVAC):\n\n # get natural ventilation ACH\n n_req = hvac.required_ach\n n_fil = hvac.infiltration_ach\n\n if n_req > n_fil:\n n_nat = max(n_req - n_fil, 0.2)\n else:\n n_nat = 0.2\n\n # any other constant natural ventilation\n n_nat += hvac.NaturalVentilation.ach\n\n h_nat_vent = 0.35 * (n_nat + n_fil) * zone.volume() # [W/K]\n\n return h_nat_vent\n\n def heat_natural_ventilation_summer_night(self, zone: Zone, hvac: HVAC):\n # calculate natural ventilation extra ACH for summer nights\n b_night = 1.5 # TODO calculate from weather data\n duration = hvac.NaturalVentilation.summer_night_duration\n n_night = hvac.NaturalVentilation.summer_night_ach\n h_nat_vent_sum_night = 0.35 * b_night * duration / 24 * n_night * zone.volume() # [W/K]\n\n return h_nat_vent_sum_night\n\n def heat_ventilation(self, zone: Zone, hvac: HVAC, heating=True) -> pd.Series:\n \"\"\"\n H_szell in [W/K]\n :param zone:\n :param hvac:\n :param heating:\n :return:\n \"\"\"\n h_nat_vent = self.heat_natural_ventilation(zone, hvac)\n h_nat_vent_summer = self.heat_natural_ventilation_summer_night(zone, hvac)\n\n # monthly mean temperature (pd.Series)\n theta_e_monthly = self.weather_data.loc['Monthly', ('External Temperature', 'Mean')]\n\n if heating:\n h_vent = pd.Series(data=h_nat_vent, index=[str(i) for i in range(1, 13)])\n else: # Cooling\n # set night ventilation for cooling season months\n h_nat_vent_summer = pd.Series(data=h_nat_vent_summer, index=[str(i) for i in range(1, 13)])\n h_nat_vent_summer.loc[['1', '2', '3', '4', '10', '11', '12']] = 0 # Cooling season only: May-Sept\n h_vent = h_nat_vent + h_nat_vent_summer\n\n return h_vent # [W/K]\n\n def heat_energy_ventilation(self, zone: Zone, hvac: HVAC, heating=True) -> pd.Series:\n \"\"\"\n Q_szell in [kWh]\n :param zone:\n :param hvac:\n :param heating:\n :return:\n \"\"\"\n h_vent = self.heat_ventilation(zone, hvac, heating=heating)\n\n if heating:\n theta_i = hvac.Heating.set_point # heating setpoint temperature\n else: # cooling\n theta_i = hvac.Cooling.set_point # cooling setpoint temperature\n\n # monthly mean temperature (pd.Series)\n theta_e_monthly = self.weather_data.loc['Monthly', ('External Temperature', 'Mean')]\n\n # length of months (pd.Series)\n delta_t = pd.Series(data=SteadyStateCalculation.month_lengths, index=[str(i) for i in range(1, 13)])\n\n q_vent: pd.Series = h_vent * (theta_i - theta_e_monthly) * delta_t / 1000\n\n return q_vent # [kWh]\n\n def heat_energy_solar(self, zone: Zone, library: ObjectLibrary, heating=True) -> pd.Series:\n\n q_sd_zone = pd.Series(data=0, index=[str(i) for i in range(1, 13)])\n\n for surface in zone.BuildingSurfaces:\n for window in surface.Fenestration:\n construction = library.get(window.Construction)\n\n # g value of glazing system\n f_g = 0.9 # solar incident correction factor\n g_n = self.g_value(construction, library) # g factor for perpendicular radiation\n g_w = g_n * f_g\n\n # area of glazing\n a_w = window.glazing_area(mode='FrameWidth', frame_width=0.1) # TODO include frame width in model\n\n # g value of shading\n if window.Shading is not None:\n shading = library.get(window.Shading)\n g_sh = shading.ShadingFactor\n if not shading.IsScheduled:\n g_sh = 1\n if heating:\n g_sh = 1\n else:\n g_sh = 1\n\n # TODO f_s shading factor of external shading surfaces\n\n # total solar radiation energy (pd.Series)\n orientation = window.orientation()\n g_s = self.weather_data.loc['Monthly', ('Total Solar Radiation Energy', orientation)]\n\n q_sd = g_w * a_w * g_sh * g_s\n q_sd_zone += q_sd\n\n return q_sd_zone # [kWh]\n\n def heat_energy_internal(self, zone: Zone, hvac: HVAC) -> pd.Series:\n\n a_n = zone.heated_area()\n q_b = hvac.internal_gain\n\n # length of months (pd.Series)\n delta_t = pd.Series(data=SteadyStateCalculation.month_lengths, index=[str(i) for i in range(1, 13)])\n\n q_b_zone = a_n * q_b * delta_t / 1000\n return q_b_zone # kWh\n\n def gamma_tao_loss_gain(self, zone: Zone, hvac: HVAC, library: ObjectLibrary,\n heating: bool) -> Tuple[pd.Series, pd.Series, pd.Series, pd.Series]:\n \"\"\"\n Helper function to calculate gamma (loss/gain ratio) and tao (time factor)\n :param zone:\n :param hvac:\n :param library:\n :return: gamma, tao, loss, gain as pd.Series\n \"\"\"\n # Total loss in kWh\n q_loss = self.heat_energy_transmission(zone, hvac, library, heating=heating)\n q_loss += self.heat_energy_ventilation(zone, hvac, heating=heating)\n\n # Total gain in kWh\n q_gain = self.heat_energy_solar(zone, library, heating=heating)\n q_gain += self.heat_energy_internal(zone, hvac)\n\n # loss/gain ratio\n gamma: pd.Series = q_gain / q_loss\n\n # zone heat store capacity in [kJ/K]\n c_m_eff = self.heat_store_capacity(zone, library) / 1000\n\n h_tr_d = self.heat_transmission_direct(zone, library)\n h_tr_t = self.heat_transmission_ground(zone, library)\n h_vent = self.heat_ventilation(zone, hvac, heating=heating)\n\n # time factor [h] as pd.Series\n tao: pd.Series = c_m_eff / 3.6 / (h_tr_d + h_tr_t + h_vent)\n\n return gamma, tao, q_loss, q_gain\n\n def heating_demand(self, zone: Zone, hvac: HVAC, library: ObjectLibrary) -> pd.Series:\n \"\"\"\n Monthly net heating energy in [kWh]\n :param zone:\n :param hvac:\n :param library:\n :return: pd.Series with the monthly demand\n \"\"\"\n gamma_h, tao_h, q_loss, q_gain = self.gamma_tao_loss_gain(zone, hvac, library, heating=True)\n\n # numeric factors for monthly calculation in case of heating\n a_h_0 = 1\n tao_h_0 = 15\n # TODO for seasonal calculation:\n # a_h_0 = 0.8\n # tao_h_0 = 30\n\n a_h = a_h_0 + tao_h / tao_h_0 # pd.Series\n\n df = pd.concat([a_h, gamma_h, q_loss, q_gain], keys=['a', 'gamma', 'loss', 'gain'], axis='columns')\n\n # utilization factor\n def utilization_factor(gamma: float, a: float, gain: float):\n if gamma > 0 and gamma != 1:\n theta = (1 - gamma ** a) / (1 - gamma ** (a + 1))\n elif gamma == 1:\n theta = a / (a + 1)\n else: # gamma <= 0\n if gain > 0:\n theta = 1 / gamma\n else: # q_gain <=0\n theta = 1\n return theta\n\n # calculate for series:\n df['theta'] = df.apply(lambda row: utilization_factor(row['gamma'], row['a'], row['gain']), axis='columns')\n\n # net heating demand\n def net_demand(gamma: float, theta: float, loss: float, gain: float):\n if gamma <= 0 and gain > 0:\n demand = 0\n elif gamma > 2:\n demand = 0\n else:\n demand = loss - theta * gain\n\n if demand < 0:\n demand = 0\n\n return demand\n\n q_h_net = df.apply(lambda row: net_demand(row['gamma'], row['theta'], row['loss'], row['gain']), axis='columns')\n q_h_net.name = zone.Name\n\n return q_h_net\n\n def cooling_demand(self, zone: Zone, hvac: HVAC, library: ObjectLibrary):\n \"\"\"\n Monthly net cooling energy in [kWh]\n :param zone:\n :param hvac:\n :param library:\n :return: pd.Series with the monthly demand\n \"\"\"\n gamma_c, tao_c, q_loss, q_gain = self.gamma_tao_loss_gain(zone, hvac, library, heating=False)\n\n # numeric factors for monthly calculation in case of cooling\n a_c_0 = 1\n tao_c_0 = 15\n # TODO for seasonal calculation:\n # a_h_0 = 0.8\n # tao_h_0 = 30\n\n a_c = a_c_0 + tao_c / tao_c_0 # pd.Series\n\n df = pd.concat([a_c, gamma_c, q_loss, q_gain], keys=['a', 'gamma', 'loss', 'gain'], axis='columns')\n\n # utilization factor\n def utilization_factor(gamma: float, a: float):\n if gamma > 0 and gamma != 1:\n theta = (1 - gamma ** (-a)) / (1 - gamma ** (-(a + 1)))\n elif gamma == 1:\n theta = a / (a + 1)\n else: # gamma <= 0\n theta = 1\n return theta\n\n # calculate for series:\n df['theta'] = df.apply(lambda row: utilization_factor(row['gamma'], row['a']), axis='columns')\n\n # net heating demand\n def net_demand(gamma: float, theta: float, loss: float, gain: float):\n if 1 / gamma > 2:\n demand = 0\n else:\n demand = gain - theta * loss\n\n if demand < 0:\n demand = 0\n\n return demand\n\n q_c_net = df.apply(lambda row: net_demand(row['gamma'], row['theta'], row['loss'], row['gain']), axis='columns')\n q_c_net.name = zone.Name\n\n return q_c_net\n\n def lighting_demand(self, zone: Zone, hvac: HVAC):\n \"\"\"\n Lighting demand with simplified calculation\n :param zone:\n :param hvac:\n :return: Total lighting demand of zone in [kWh/year]\n \"\"\"\n\n # power density in W/m2\n p = hvac.Lighting.power_density\n f_fe = 1 # non-dimmable lights\n f_szab = 1\n t_nappal = 3000 # [h]\n t_ejjel = 2000 # [h]\n\n envelope_area = 0\n glazing_area = 0\n for surface in zone.BuildingSurfaces:\n if surface.OutsideBoundaryCondition.lower() == \"outdoors\" and surface.SurfaceType.lower() == 'wall':\n envelope_area += surface.area()\n for window in surface.Fenestration:\n glazing_area += window.glazing_area(mode='FrameWidth', frame_width=0.1)\n # TODO include frame width in model\n\n if envelope_area == 0:\n glazing_ratio = 0\n else:\n glazing_ratio = glazing_area / envelope_area\n\n if glazing_ratio > 0.8:\n f_nappal = 0.56\n elif 0.8 > glazing_ratio > 0.4:\n f_nappal = 0.7\n else:\n f_nappal = 0.83\n\n w_vil = f_fe * p * f_szab * (t_nappal * f_nappal + t_ejjel) * zone.heated_area() / 1000\n\n return w_vil\n\n def calculate(self, building: Building) -> pd.DataFrame:\n \"\"\"\n Calculate heating cooling and lighting demand of building for each month\n :param building:\n :return: demands as pandas DataFrame\n \"\"\"\n\n heating = []\n cooling = []\n lights = []\n\n for zone in building.Zones:\n heating.append(self.heating_demand(zone=zone, hvac=building.HVAC, library=building.Library))\n cooling.append(self.cooling_demand(zone=zone, hvac=building.HVAC, library=building.Library))\n lights.append(self.lighting_demand(zone=zone, hvac=building.HVAC))\n\n heating_demand = pd.concat(heating, axis='columns').sum(axis='columns')\n cooling_demand = pd.concat(cooling, axis='columns').sum(axis='columns')\n lights_demand = sum(lights) # float (yearly demand)\n\n result = pd.concat([heating_demand, cooling_demand], axis='columns',\n keys=['heating', 'cooling'])\n result['lights'] = lights_demand / 12 # (monthly)\n return result" ]
[ [ "pandas.concat", "pandas.MultiIndex.from_tuples", "pandas.read_json", "pandas.DataFrame" ] ]
DamLabResources/hiv-transformers
[ "fb44f73e542c54974489cd1fa59fdadbf60d5e72" ]
[ "workflow/wrappers/huggingface_train/wrapper.py" ]
[ "from yaml import full_load, dump\nfrom datasets import load_dataset, DatasetDict, ClassLabel, Array2D\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support, roc_auc_score, mean_absolute_error, max_error, r2_score\nfrom sklearn.preprocessing import label_binarize\nfrom transformers import TrainingArguments\nfrom transformers import EarlyStoppingCallback, AutoModelForSequenceClassification\nimport torch\nimport os\nfrom scipy.special import softmax\nfrom collections import Counter\nimport numpy as np\nimport pandas as pd\n\nfrom common import CustomTrainer, categorical_metrics_factory, multiclass_metrics_factory, regression_metrics_factory\n\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n\ndataset_path = snakemake.input['dataset']\nmeta = snakemake.params['meta']\n\n\n# Get the model and init function\npretrained = snakemake.input.get('pretrained', None)\nif pretrained is None:\n pretrained = snakemake.params.get('pretrained', None)\n\nif pretrained is None:\n pretrained = meta.get('pretrained', None)\n\nassert pretrained is not None, 'pretrained must be specified in either input, params, or in the model meta'\n\n\ndataset = DatasetDict.load_from_disk(dataset_path)\nif type(dataset['train'].features['labels']) == ClassLabel:\n # Classification task\n\n multi_class = False\n\n id2label = dict((n, label) for n, label in enumerate(\n dataset['train'].features['labels'].names))\n label2id = dict((label, n) for n, label in enumerate(\n dataset['train'].features['labels'].names))\n num_labels = dataset['train'].features['labels'].num_classes\n\n labels = pd.Series(dataset['train']['labels'])\n label_counts = labels.value_counts().reindex(range(num_labels))\n train_weights = 1-(label_counts.values/len(labels)).astype(np.float32)\n\n typ = 'categorical'\n metrics = categorical_metrics_factory(id2label)\n\nelif type(dataset['train'].features['labels']) == Array2D:\n\n multi_class = True\n\n num_labels = dataset['train'].features['labels'].shape[1]\n id2label = meta['id2label']\n label2id = dict((val, key) for key, val in id2label.items())\n\n labels = np.array(dataset['train']['labels'])\n train_weights = (labels == 1).sum()/(labels == 0).sum()\n\n typ = 'multi_class'\n metrics = multiclass_metrics_factory(id2label)\n\nelse:\n # Regression task\n name = meta.get('task', 'raw_value')\n id2label = {0: name}\n label2id = {name: 0}\n num_labels = 1\n train_weights = None\n\n typ = 'regression'\n metrics = regression_metrics_factory(id2label)\n\n\ndef model_init():\n return AutoModelForSequenceClassification.from_pretrained(pretrained,\n num_labels=num_labels,\n label2id=label2id,\n id2label=id2label)\n\n\n# Grab parameters for training\nEPOCHS = snakemake.params.get('epochs', 200)\n\ncallbacks = []\nif snakemake.params.get('early_stopping', True):\n callbacks.append(EarlyStoppingCallback(\n early_stopping_patience=snakemake.params.get('early_stopping', 3)))\n\n# Best trained model\nout_model = snakemake.output.get('model', None)\n\n# Metric\nout_metric = snakemake.output['metric']\n\nkeep_cols = {'labels', 'attention_mask', 'input_ids', 'token_type_ids'}\nrm_cols = [col for col in dataset.column_names['train']\n if col not in keep_cols]\n\ntrim_dataset = dataset.remove_columns(rm_cols)\n\n# Based on Rostlab github example.\ntraining_args = TrainingArguments(\"test_trainer\",\n evaluation_strategy='epoch',\n load_best_model_at_end=True,\n save_strategy='epoch',\n logging_first_step=True,\n logging_steps=10,\n num_train_epochs=EPOCHS,\n warmup_steps=50,\n weight_decay=0.01,\n gradient_accumulation_steps=64,\n lr_scheduler_type='cosine_with_restarts',\n )\n\ntrainer = CustomTrainer(\n model_init=model_init,\n args=training_args,\n train_dataset=trim_dataset['train'],\n eval_dataset=trim_dataset['test'],\n callbacks=callbacks,\n compute_metrics=metrics,\n class_weights=train_weights,\n train_type=typ\n)\n\nresults = trainer.train()\n\nif out_model is not None:\n trainer.save_model(out_model)\n\ntest_predictions = trainer.predict(trim_dataset['test'])\n\nwith open(out_metric, 'w') as handle:\n dump(test_predictions.metrics, handle)" ]
[ [ "numpy.array", "pandas.Series" ] ]
JoaoCarabetta/PyMove
[ "0b712a9b65e0a5666db4bfecee3cd038ed155f7d" ]
[ "pymove/core/grid.py" ]
[ "import math\nfrom typing import Callable, Dict, Optional, Text, Tuple, Union\n\nimport joblib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.pyplot import figure\nfrom pandas import DataFrame\nfrom shapely.geometry import Polygon\n\nfrom pymove.utils.constants import (\n DATETIME,\n INDEX_GRID,\n INDEX_GRID_LAT,\n INDEX_GRID_LON,\n LATITUDE,\n LONGITUDE,\n POLYGON,\n TRAJ_ID,\n)\nfrom pymove.utils.conversions import lat_meters\nfrom pymove.utils.log import progress_bar\nfrom pymove.utils.mem import begin_operation, end_operation\n\n\nclass Grid:\n def __init__(\n self,\n data: Union[DataFrame, Dict],\n cell_size: Optional[float] = None,\n meters_by_degree: Optional[float] = lat_meters(-3.8162973555)\n ):\n \"\"\"\n Creates a virtual grid from the trajectories.\n\n Parameters\n ----------\n data : DataFrame or dict\n Dataframe containing the trajectories\n Dict with grid information\n 'lon_min_x': minimum x of grid,\n 'lat_min_y': minimum y of grid,\n 'grid_size_lat_y': lat y size of grid,\n 'grid_size_lon_x': lon x size of grid,\n 'cell_size_by_degree': cell size in radians,\n cell_size : float, optional\n Represents grid cell size, by default None\n meters_by_degree : float, optional\n Represents the corresponding meters of lat by degree,\n by default lat_meters(-3.8162973555)\n \"\"\"\n self.last_operation = None\n if isinstance(data, dict):\n self._grid_from_dict(data)\n else:\n self._create_virtual_grid(data, cell_size, meters_by_degree)\n self.grid_polygon = None\n\n def get_grid(self) -> Dict:\n \"\"\"\n Returns the grid object in a dict format.\n\n Returns\n -------\n Dict\n Dict with grid information\n 'lon_min_x': minimum x of grid,\n 'lat_min_y': minimum y of grid,\n 'grid_size_lat_y': lat y size of grid,\n 'grid_size_lon_x': lon x size of grid,\n 'cell_size_by_degree': cell size in radians\n \"\"\"\n return {\n 'lon_min_x': self.lon_min_x,\n 'lat_min_y': self.lat_min_y,\n 'grid_size_lat_y': self.grid_size_lat_y,\n 'grid_size_lon_x': self.grid_size_lon_x,\n 'cell_size_by_degree': self.cell_size_by_degree,\n }\n\n def _grid_from_dict(self, dict_grid: Dict):\n \"\"\"\n Coverts the dict grid to a Grid object.\n\n Parameters\n ----------\n dict_grid : dict\n Dictionary with grid information\n 'lon_min_x': minimum x of grid,\n 'lat_min_y': minimum y of grid,\n 'grid_size_lat_y': lat y size of grid,\n 'grid_size_lon_x': lon x size of grid,\n 'cell_size_by_degree': cell size in radians,\n \"\"\"\n self.lon_min_x = dict_grid['lon_min_x']\n self.lat_min_y = dict_grid['lat_min_y']\n self.grid_size_lat_y = dict_grid['grid_size_lat_y']\n self.grid_size_lon_x = dict_grid['grid_size_lon_x']\n self.cell_size_by_degree = dict_grid['cell_size_by_degree']\n\n def _create_virtual_grid(\n self, data: DataFrame, cell_size: float, meters_by_degree: float\n ):\n \"\"\"\n Create a virtual grid based in dataset bound box.\n\n Parameters\n ----------\n data : DataFrame\n Represents the dataset with contains lat, long and datetime\n cell_size : float\n Size of grid cell\n meters_by_degree : float\n Represents the meters degree of latitude\n\n \"\"\"\n\n operation = begin_operation('_create_virtual_grid')\n\n bbox = data.get_bbox()\n print('\\nCreating a virtual grid without polygons')\n\n # Latitude in Fortaleza: -3.8162973555\n cell_size_by_degree = cell_size / meters_by_degree\n print('...cell size by degree: %s' % cell_size_by_degree)\n\n lat_min_y = bbox[0]\n lon_min_x = bbox[1]\n lat_max_y = bbox[2]\n lon_max_x = bbox[3]\n\n # If cell size does not fit in the grid area, an expansion is made\n if math.fmod((lat_max_y - lat_min_y), cell_size_by_degree) != 0:\n lat_max_y = lat_min_y + cell_size_by_degree * (\n math.floor((lat_max_y - lat_min_y) / cell_size_by_degree) + 1\n )\n\n if math.fmod((lon_max_x - lon_min_x), cell_size_by_degree) != 0:\n lon_max_x = lon_min_x + cell_size_by_degree * (\n math.floor((lon_max_x - lon_min_x) / cell_size_by_degree) + 1\n )\n\n # adjust grid size to lat and lon\n grid_size_lat_y = int(\n round((lat_max_y - lat_min_y) / cell_size_by_degree)\n )\n grid_size_lon_x = int(\n round((lon_max_x - lon_min_x) / cell_size_by_degree)\n )\n\n print(\n '...grid_size_lat_y:%s\\ngrid_size_lon_x:%s'\n % (grid_size_lat_y, grid_size_lon_x)\n )\n\n self.lon_min_x = lon_min_x\n self.lat_min_y = lat_min_y\n self.grid_size_lat_y = grid_size_lat_y\n self.grid_size_lon_x = grid_size_lon_x\n self.cell_size_by_degree = cell_size_by_degree\n print('\\n..A virtual grid was created')\n\n self.last_operation = end_operation(operation)\n\n def create_update_index_grid_feature(\n self,\n data: DataFrame,\n unique_index: Optional[bool] = True,\n label_dtype: Optional[Callable] = np.int64,\n sort: Optional[bool] = True\n ):\n \"\"\"\n Create or update index grid feature. It not necessary pass dic_grid,\n because if don't pass, the function create a dic_grid.\n\n Parameters\n ----------\n data : DataFrame\n Represents the dataset with contains lat, long and datetime.\n unique_index: bool, optional\n How to index the grid, by default True\n label_dtype : Optional[Callable], optional\n Represents the type of a value of new column in dataframe, by default np.int64\n sort : bool, optional\n Represents if needs to sort the dataframe, by default True\n\n \"\"\"\n\n operation = begin_operation('create_update_index_grid_feature')\n\n print('\\nCreating or updating index of the grid feature..\\n')\n try:\n if sort:\n data.sort_values([TRAJ_ID, DATETIME], inplace=True)\n lat_, lon_ = self.point_to_index_grid(\n data[LATITUDE], data[LONGITUDE]\n )\n lat_, lon_ = label_dtype(lat_), label_dtype(lon_)\n dict_grid = self.get_grid()\n if unique_index:\n data[INDEX_GRID] = lon_ * dict_grid['grid_size_lat_y'] + lat_\n else:\n data[INDEX_GRID_LAT] = lat_\n data[INDEX_GRID_LON] = lon_\n self.last_operation = end_operation(operation)\n except Exception as e:\n self.last_operation = end_operation(operation)\n raise e\n\n def convert_two_index_grid_to_one(\n self,\n data: DataFrame,\n label_grid_lat: Optional[Text] = INDEX_GRID_LAT,\n label_grid_lon: Optional[Text] = INDEX_GRID_LON,\n ):\n \"\"\"\n Converts grid lat-lon ids to unique values\n\n Parameters\n ----------\n data : DataFrame\n Dataframe with grid lat-lon ids\n label_grid_lat : str, optional\n grid lat id column, by default INDEX_GRID_LAT\n label_grid_lon : str, optional\n grid lon id column, by default INDEX_GRID_LON\n \"\"\"\n dict_grid = self.get_grid()\n data[INDEX_GRID] = (\n data[label_grid_lon] * dict_grid['grid_size_lat_y'] + data[label_grid_lat]\n )\n\n def convert_one_index_grid_to_two(\n self,\n data: DataFrame,\n label_grid_index: Optional[Text] = INDEX_GRID,\n ):\n \"\"\"\n Converts grid lat-lon ids to unique values\n\n Parameters\n ----------\n data : DataFrame\n Dataframe with grid lat-lon ids\n label_grid_index : str, optional\n grid unique id column, by default INDEX_GRID\n \"\"\"\n dict_grid = self.get_grid()\n data[INDEX_GRID_LAT] = data[label_grid_index] % dict_grid['grid_size_lat_y']\n data[INDEX_GRID_LON] = data[label_grid_index] // dict_grid['grid_size_lat_y']\n\n def create_one_polygon_to_point_on_grid(\n self, index_grid_lat: int, index_grid_lon: int\n ) -> Polygon:\n \"\"\"\n Create one polygon to point on grid.\n\n Parameters\n ----------\n index_grid_lat : int\n Represents index of grid that reference latitude.\n index_grid_lon : int\n Represents index of grid that reference longitude.\n\n Returns\n -------\n Polygon\n Represents a polygon of this cell in a grid.\n\n \"\"\"\n\n operation = begin_operation('create_one_polygon_to_point_on_grid')\n\n cell_size = self.cell_size_by_degree\n lat_init = self.lat_min_y + cell_size * index_grid_lat\n lon_init = self.lon_min_x + cell_size * index_grid_lon\n polygon = Polygon((\n (lon_init, lat_init),\n (lon_init, lat_init + cell_size),\n (lon_init + cell_size, lat_init + cell_size),\n (lon_init + cell_size, lat_init)\n ))\n self.last_operation = end_operation(operation)\n\n return polygon\n\n def create_all_polygons_on_grid(self):\n \"\"\"\n Create all polygons that are represented in a grid and store them in a\n new dic_grid key .\n\n \"\"\"\n\n operation = begin_operation('create_all_polygons_on_grid')\n\n print('\\nCreating all polygons on virtual grid', flush=True)\n grid_polygon = np.array(\n [\n [None for _ in range(self.grid_size_lon_x)]\n for _ in range(self.grid_size_lat_y)\n ]\n )\n lat_init = self.lat_min_y\n cell_size = self.cell_size_by_degree\n for i in progress_bar(range(self.grid_size_lat_y)):\n lon_init = self.lon_min_x\n for j in range(self.grid_size_lon_x):\n # Cria o polygon da célula\n grid_polygon[i][j] = Polygon((\n (lon_init, lat_init),\n (lon_init, lat_init + cell_size),\n (lon_init + cell_size, lat_init + cell_size),\n (lon_init + cell_size, lat_init)\n ))\n lon_init += cell_size\n lat_init += cell_size\n self.grid_polygon = grid_polygon\n print('...geometries saved on Grid grid_polygon property')\n self.last_operation = end_operation(operation)\n\n def create_all_polygons_to_all_point_on_grid(\n self, data: DataFrame\n ) -> DataFrame:\n \"\"\"\n Create all polygons to all points represented in a grid.\n\n Parameters\n ----------\n data : DataFrame\n Represents the dataset with contains lat, long and datetime\n\n Returns\n -------\n DataFrame\n Represents the same dataset with new key 'polygon'\n where polygons were saved.\n\n \"\"\"\n\n operation = begin_operation('create_all_polygons_to_all_point_on_grid')\n if INDEX_GRID_LAT not in data or INDEX_GRID_LON not in data:\n self.create_update_index_grid_feature(data, unique_index=False)\n\n datapolygons = data[[TRAJ_ID, INDEX_GRID_LAT, INDEX_GRID_LON]].drop_duplicates()\n\n polygons = datapolygons.apply(\n lambda row: self.create_one_polygon_to_point_on_grid(\n row[INDEX_GRID_LAT], row[INDEX_GRID_LON]\n ), axis=1\n )\n\n print('...polygons were created')\n datapolygons['polygon'] = polygons\n self.last_operation = end_operation(operation)\n return datapolygons\n\n def point_to_index_grid(self, event_lat: float, event_lon: float) -> Tuple[int, int]:\n \"\"\"\n Locate the coordinates x and y in a grid of point (lat, long).\n\n Parameters\n ----------\n event_lat : float\n Represents the latitude of a point\n event_lon : float\n Represents the longitude of a point\n\n Returns\n -------\n Tuple[int, int]\n Represents the index y in a grid of a point (lat, long)\n Represents the index x in a grid of a point (lat, long)\n\n \"\"\"\n\n operation = begin_operation('create_all_polygons_to_all_point_on_grid')\n\n indexes_lat_y = np.floor(\n (np.float64(event_lat) - self.lat_min_y) / self.cell_size_by_degree\n )\n indexes_lon_x = np.floor(\n (np.float64(event_lon) - self.lon_min_x) / self.cell_size_by_degree\n )\n print(\n '...[%s,%s] indexes were created to lat and lon'\n % (indexes_lat_y.size, indexes_lon_x.size)\n )\n self.last_operation = end_operation(operation)\n\n return indexes_lat_y, indexes_lon_x\n\n def save_grid_pkl(self, filename: Text):\n \"\"\"\n Save a grid with new file .pkl.\n\n Parameters\n ----------\n filename : Text\n Represents the name of a file.\n\n \"\"\"\n\n operation = begin_operation('save_grid_pkl')\n with open(filename, 'wb') as f:\n joblib.dump(self.get_grid(), f)\n self.last_operation = end_operation(operation)\n\n def read_grid_pkl(self, filename: Text) -> 'Grid':\n \"\"\"\n Read grid dict from a file .pkl.\n\n Parameters\n ----------\n filename : str\n Represents the name of a file.\n\n Returns\n -------\n Grid\n Grid object containing informations about virtual grid\n\n \"\"\"\n operation = begin_operation('read_grid_pkl')\n with open(filename, 'rb') as f:\n dict_grid = joblib.load(f)\n grid = Grid(data=dict_grid)\n self.last_operation = end_operation(operation)\n return grid\n\n def show_grid_polygons(\n self,\n data: DataFrame,\n markersize: Optional[float] = 10,\n linewidth: Optional[float] = 2,\n figsize: Optional[Tuple[int, int]] = (10, 10),\n return_fig: Optional[bool] = True,\n save_fig: Optional[bool] = False,\n name: Optional[Text] = 'grid.png',\n ) -> Optional[figure]:\n \"\"\"\n Generate a visualization with grid polygons.\n\n Parameters\n ----------\n data : DataFrame\n Input trajectory data\n markersize : float, optional\n Represents visualization size marker, by default 10\n linewidth : float, optional\n Represents visualization size line, by default 2\n figsize : tuple(int, int), optional\n Represents the size (float: width, float: height) of a figure,\n by default (10, 10)\n return_fig : bool, optional\n Represents whether or not to save the generated picture, by default True\n save_fig : bool, optional\n Wether to save the figure, by default False\n name : str, optional\n Represents name of a file, by default 'grid.png'\n\n Returns\n -------\n Optional[figure]\n The generated picture or None\n\n Raises\n ------\n If the dataframe does not contains the POLYGON feature\n IndexError\n If there is no user with the id passed\n\n \"\"\"\n if POLYGON not in data:\n raise KeyError('POLYGON feature not in dataframe')\n\n data.dropna(subset=[POLYGON], inplace=True)\n\n operation = begin_operation('show_grid_polygons')\n\n fig = plt.figure(figsize=figsize)\n\n for _, row in data.iterrows():\n xs, ys = row[POLYGON].exterior.xy\n plt.plot(ys, xs, 'g', linewidth=linewidth, markersize=markersize)\n xs_start, ys_start = data.iloc[0][POLYGON].exterior.xy\n xs_end, ys_end = data.iloc[-1][POLYGON].exterior.xy\n plt.plot(ys_start, xs_start, 'bo', markersize=markersize * 1.5)\n plt.plot(ys_end, xs_end, 'bX', markersize=markersize * 1.5) # start point\n\n if save_fig:\n plt.savefig(fname=name, fig=fig)\n\n self.last_operation = end_operation(operation)\n\n if return_fig:\n return fig\n\n def __repr__(self) -> str:\n \"\"\"\n String representation of grid\n\n Returns\n -------\n str\n lon_min_x: min longitude\n lat_min_y: min latitude\n grid_size_lat_y: grid latitude size\n grid_size_lon_x: grid longitude size\n cell_size_by_degree: grid cell size\n \"\"\"\n text = ['{}: {}'.format(k, v) for k, v in self.get_grid().items()]\n return '\\n'.join(text)\n" ]
[ [ "matplotlib.pyplot.plot", "numpy.float64", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ] ]