repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
dataplumber/nexus
[ "f25a89e85eba098da9c6db1ff3d408dae8a6b310", "f25a89e85eba098da9c6db1ff3d408dae8a6b310" ]
[ "analysis/webservice/Filtering.py", "analysis/webservice/NexusHandler.py" ]
[ "\"\"\"\nCopyright (c) 2016 Jet Propulsion Laboratory,\nCalifornia Institute of Technology. All rights reserved\n\"\"\"\n\nimport math\n\nimport logging\nimport traceback\n\nimport numpy as np\nfrom scipy import stats\nfrom scipy.fftpack import fft\nfrom scipy.ndimage.interpolation import zoom\nfrom scipy.interpolate import UnivariateSpline\nfrom scipy.signal import wiener, filtfilt, butter, gaussian, freqz\nfrom scipy.ndimage import filters\n\nlog = logging.getLogger('Filtering')\n\n\ndef __fieldToList(results, field):\n a = np.zeros(len(results))\n for n in range(0, len(results)):\n a[n] = results[n][field]\n return a\n\n\ndef __listToField(results, l, field):\n if results is None or l is None:\n raise Exception(\"Cannot transpose values if they're null\")\n\n if not len(results) == len(l):\n raise Exception(\"Cannot transpose values between lists of inequal length\")\n\n for n in range(0, len(results)):\n results[n][field] = l[n]\n\n\ndef applySeasonalCycleFilter1d(l):\n if len(l) <= 12:\n return l\n\n for a in range(0, 12):\n values = []\n for b in range(a, len(l), 12):\n values.append(l[b])\n avg = np.average(values)\n for b in range(a, len(l), 12):\n l[b] -= avg\n return l\n\n\ndef applySeasonalCycleFilter2d(l):\n return l\n\n\n'''\n Implements monthly filtering of seasonal cycles.\n'''\n\n\ndef applySeasonalCycleFilter(l):\n if len(np.shape(l)) == 1:\n return applySeasonalCycleFilter1d(l)\n elif len(np.shape(l)) == 2:\n return applySeasonalCycleFilter2d(l)\n else:\n raise Exception(\"Cannot apply seasonal cycle filter: Unsupported array shape\")\n\n\ndef applySeasonalCycleFilterOnResultsField(results, field):\n l = __fieldToList(results, field)\n applySeasonalCycleFilter(l)\n __listToField(results, l, field)\n\n\ndef applySeasonalCycleFilterOnResults(results):\n [applySeasonalCycleFilterOnResultsField(results, field) for field in ['mean', 'max', 'min']]\n\n\n'''\nhttp://www.nehalemlabs.net/prototype/blog/2013/04/05/an-introduction-to-smoothing-time-series-in-python-part-i-filtering-theory/\n'''\n\n\ndef applyLowPassFilter(y, lowcut=12.0, order=9.0):\n if len(y) - 12 <= lowcut:\n lowcut = 3\n nyq = 0.5 * len(y)\n low = lowcut / nyq\n # high = highcut / nyq\n b, a = butter(order, low)\n m = min([len(y), len(a), len(b)])\n padlen = 30 if m >= 30 else m\n fl = filtfilt(b, a, y, padlen=padlen)\n return fl\n\n\ndef applyFiltersOnField(results, field, applySeasonal=False, applyLowPass=False, append=\"\"):\n x = __fieldToList(results, field)\n\n if applySeasonal:\n x = applySeasonalCycleFilter(x)\n if applyLowPass:\n x = applyLowPassFilter(x)\n __listToField(results, x, \"%s%s\" % (field, append))\n\n\ndef applyAllFiltersOnField(results, field, applySeasonal=True, applyLowPass=True):\n try:\n if applySeasonal:\n applyFiltersOnField(results, field, applySeasonal=True, applyLowPass=False, append=\"Seasonal\")\n except Exception as e:\n # If it doesn't work log the error but ignore it\n tb = traceback.format_exc()\n log.warn(\"Error calculating Seasonal filter:\\n%s\" % tb)\n\n try:\n if applyLowPass:\n applyFiltersOnField(results, field, applySeasonal=False, applyLowPass=True, append=\"LowPass\")\n except Exception as e:\n # If it doesn't work log the error but ignore it\n tb = traceback.format_exc()\n log.warn(\"Error calculating LowPass filter:\\n%s\" % tb)\n\n try:\n if applySeasonal and applyLowPass:\n applyFiltersOnField(results, field, applySeasonal=True, applyLowPass=True, append=\"SeasonalLowPass\")\n except Exception as e:\n # If it doesn't work log the error but ignore it\n tb = traceback.format_exc()\n log.warn(\"Error calculating SeasonalLowPass filter:\\n%s\" % tb)\n\n\n'''\nclass ResultsFilter(object):\n\n def __init__(self):\n pass\n\n def filter(self, results, append, **kwargs):\n pass\n\n\n\nclass SeasonalCycleFilter(ResultsFilter):\n\n def filter(self, results, append, **kwargs):\n [applySeasonalCycleFilterOnResultsField(results, field) for field in ['mean', 'max', 'min']]\n\nif __name__ == \"__main__\":\n\n foo = \"bar\"\n f = ResultsFilter()\n f.test(\"Tester\", blah=foo)\n'''\n", "\"\"\"\nCopyright (c) 2016 Jet Propulsion Laboratory,\nCalifornia Institute of Technology. All rights reserved\n\"\"\"\nimport sys\nimport numpy as np\nimport logging\nimport time\nimport types\nfrom datetime import datetime\nfrom netCDF4 import Dataset\nfrom nexustiles.nexustiles import NexusTileService\nfrom webservice.webmodel import NexusProcessingException\n\nAVAILABLE_HANDLERS = []\nAVAILABLE_INITIALIZERS = []\n\n\ndef nexus_initializer(clazz):\n log = logging.getLogger(__name__)\n try:\n wrapper = NexusInitializerWrapper(clazz)\n log.info(\"Adding initializer '%s'\" % wrapper.clazz())\n AVAILABLE_INITIALIZERS.append(wrapper)\n except Exception as ex:\n log.warn(\"Initializer '%s' failed to load (reason: %s)\" % (clazz, ex.message), exc_info=True)\n return clazz\n\n\ndef nexus_handler(clazz):\n log = logging.getLogger(__name__)\n try:\n wrapper = AlgorithmModuleWrapper(clazz)\n log.info(\"Adding algorithm module '%s' with path '%s' (%s)\" % (wrapper.name(), wrapper.path(), wrapper.clazz()))\n AVAILABLE_HANDLERS.append(wrapper)\n except Exception as ex:\n log.warn(\"Handler '%s' is invalid and will be skipped (reason: %s)\" % (clazz, ex.message), exc_info=True)\n return clazz\n\n\nDEFAULT_PARAMETERS_SPEC = {\n \"ds\": {\n \"name\": \"Dataset\",\n \"type\": \"string\",\n \"description\": \"One or more comma-separated dataset shortnames\"\n },\n \"minLat\": {\n \"name\": \"Minimum Latitude\",\n \"type\": \"float\",\n \"description\": \"Minimum (Southern) bounding box Latitude\"\n },\n \"maxLat\": {\n \"name\": \"Maximum Latitude\",\n \"type\": \"float\",\n \"description\": \"Maximum (Northern) bounding box Latitude\"\n },\n \"minLon\": {\n \"name\": \"Minimum Longitude\",\n \"type\": \"float\",\n \"description\": \"Minimum (Western) bounding box Longitude\"\n },\n \"maxLon\": {\n \"name\": \"Maximum Longitude\",\n \"type\": \"float\",\n \"description\": \"Maximum (Eastern) bounding box Longitude\"\n },\n \"startTime\": {\n \"name\": \"Start Time\",\n \"type\": \"long integer\",\n \"description\": \"Starting time in milliseconds since midnight Jan. 1st, 1970 UTC\"\n },\n \"endTime\": {\n \"name\": \"End Time\",\n \"type\": \"long integer\",\n \"description\": \"Ending time in milliseconds since midnight Jan. 1st, 1970 UTC\"\n },\n \"lowPassFilter\": {\n \"name\": \"Apply Low Pass Filter\",\n \"type\": \"boolean\",\n \"description\": \"Specifies whether to apply a low pass filter on the analytics results\"\n },\n \"seasonalFilter\": {\n \"name\": \"Apply Seasonal Filter\",\n \"type\": \"boolean\",\n \"description\": \"Specified whether to apply a seasonal cycle filter on the analytics results\"\n }\n}\n\n\nclass NexusInitializerWrapper:\n def __init__(self, clazz):\n self.__log = logging.getLogger(__name__)\n self.__hasBeenRun = False\n self.__clazz = clazz\n self.validate()\n\n def validate(self):\n if \"init\" not in self.__clazz.__dict__ or not type(self.__clazz.__dict__[\"init\"]) == types.FunctionType:\n raise Exception(\"Method 'init' has not been declared\")\n\n def clazz(self):\n return self.__clazz\n\n def hasBeenRun(self):\n return self.__hasBeenRun\n\n def init(self, config):\n if not self.__hasBeenRun:\n self.__hasBeenRun = True\n instance = self.__clazz()\n instance.init(config)\n else:\n self.log(\"Initializer '%s' has already been run\" % self.__clazz)\n\n\nclass AlgorithmModuleWrapper:\n def __init__(self, clazz):\n self.__instance = None\n self.__clazz = clazz\n self.validate()\n\n def validate(self):\n if \"calc\" not in self.__clazz.__dict__ or not type(self.__clazz.__dict__[\"calc\"]) == types.FunctionType:\n raise Exception(\"Method 'calc' has not been declared\")\n\n if \"path\" not in self.__clazz.__dict__:\n raise Exception(\"Property 'path' has not been defined\")\n\n if \"name\" not in self.__clazz.__dict__:\n raise Exception(\"Property 'name' has not been defined\")\n\n if \"description\" not in self.__clazz.__dict__:\n raise Exception(\"Property 'description' has not been defined\")\n\n if \"params\" not in self.__clazz.__dict__:\n raise Exception(\"Property 'params' has not been defined\")\n\n def clazz(self):\n return self.__clazz\n\n def name(self):\n return self.__clazz.name\n\n def path(self):\n return self.__clazz.path\n\n def description(self):\n return self.__clazz.description\n\n def params(self):\n return self.__clazz.params\n\n def instance(self, algorithm_config=None, sc=None):\n if \"singleton\" in self.__clazz.__dict__ and self.__clazz.__dict__[\"singleton\"] is True:\n if self.__instance is None:\n self.__instance = self.__clazz()\n\n try:\n self.__instance.set_config(algorithm_config)\n except AttributeError:\n pass\n\n try:\n self.__instance.set_spark_context(sc)\n except AttributeError:\n pass\n\n return self.__instance\n else:\n instance = self.__clazz()\n\n try:\n instance.set_config(algorithm_config)\n except AttributeError:\n pass\n\n try:\n self.__instance.set_spark_context(sc)\n except AttributeError:\n pass\n return instance\n\n def isValid(self):\n try:\n self.validate()\n return True\n except Exception as ex:\n return False\n\n\nclass CalcHandler(object):\n def calc(self, computeOptions, **args):\n raise Exception(\"calc() not yet implemented\")\n\n\nclass NexusHandler(CalcHandler):\n def __init__(self, skipCassandra=False, skipSolr=False):\n CalcHandler.__init__(self)\n\n self.algorithm_config = None\n self._tile_service = NexusTileService(skipCassandra, skipSolr)\n\n def set_config(self, algorithm_config):\n self.algorithm_config = algorithm_config\n\n def _mergeDicts(self, x, y):\n z = x.copy()\n z.update(y)\n return z\n\n def _now(self):\n millis = int(round(time.time() * 1000))\n return millis\n\n def _mergeDataSeries(self, resultsData, dataNum, resultsMap):\n\n for entry in resultsData:\n\n #frmtdTime = datetime.fromtimestamp(entry[\"time\"] ).strftime(\"%Y-%m\")\n frmtdTime = entry[\"time\"]\n\n if not frmtdTime in resultsMap:\n resultsMap[frmtdTime] = []\n entry[\"ds\"] = dataNum\n resultsMap[frmtdTime].append(entry)\n\n def _resultsMapToList(self, resultsMap):\n resultsList = []\n for key, value in resultsMap.iteritems():\n resultsList.append(value)\n\n resultsList = sorted(resultsList, key=lambda entry: entry[0][\"time\"])\n return resultsList\n\n def _mergeResults(self, resultsRaw):\n resultsMap = {}\n\n for i in range(0, len(resultsRaw)):\n resultsSeries = resultsRaw[i]\n resultsData = resultsSeries[0]\n self._mergeDataSeries(resultsData, i, resultsMap)\n\n resultsList = self._resultsMapToList(resultsMap)\n return resultsList\n\n\nclass SparkHandler(NexusHandler):\n class SparkJobContext(object):\n\n class MaxConcurrentJobsReached(Exception):\n def __init__(self, *args, **kwargs):\n Exception.__init__(self, *args, **kwargs)\n\n def __init__(self, job_stack):\n self.spark_job_stack = job_stack\n self.job_name = None\n self.log = logging.getLogger(__name__)\n\n def __enter__(self):\n try:\n self.job_name = self.spark_job_stack.pop()\n self.log.debug(\"Using %s\" % self.job_name)\n except IndexError:\n raise SparkHandler.SparkJobContext.MaxConcurrentJobsReached()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.job_name is not None:\n self.log.debug(\"Returning %s\" % self.job_name)\n self.spark_job_stack.append(self.job_name)\n\n def __init__(self, **kwargs):\n import inspect\n NexusHandler.__init__(self, **kwargs)\n self._sc = None\n\n self.spark_job_stack = []\n\n def with_spark_job_context(calc_func):\n from functools import wraps\n\n @wraps(calc_func)\n def wrapped(*args, **kwargs1):\n try:\n with SparkHandler.SparkJobContext(self.spark_job_stack) as job_context:\n # TODO Pool and Job are forced to a 1-to-1 relationship\n calc_func.im_self._sc.setLocalProperty(\"spark.scheduler.pool\", job_context.job_name)\n calc_func.im_self._sc.setJobGroup(job_context.job_name, \"a spark job\")\n return calc_func(*args, **kwargs1)\n except SparkHandler.SparkJobContext.MaxConcurrentJobsReached:\n raise NexusProcessingException(code=503,\n reason=\"Max concurrent requests reached. Please try again later.\")\n\n return wrapped\n\n for member in inspect.getmembers(self, predicate=inspect.ismethod):\n if member[0] == \"calc\":\n setattr(self, member[0], with_spark_job_context(member[1]))\n\n def set_spark_context(self, sc):\n self._sc = sc\n\n def set_config(self, algorithm_config):\n max_concurrent_jobs = algorithm_config.getint(\"spark\", \"maxconcurrentjobs\") if algorithm_config.has_section(\n \"spark\") and algorithm_config.has_option(\"spark\", \"maxconcurrentjobs\") else 10\n self.spark_job_stack = list([\"Job %s\" % x for x in xrange(1, max_concurrent_jobs + 1)])\n self.algorithm_config = algorithm_config\n\n def _setQueryParams(self, ds, bounds, start_time=None, end_time=None,\n start_year=None, end_year=None, clim_month=None,\n fill=-9999., spark_master=None, spark_nexecs=None,\n spark_nparts=None):\n self._ds = ds\n self._minLat, self._maxLat, self._minLon, self._maxLon = bounds\n self._startTime = start_time\n self._endTime = end_time\n self._startYear = start_year\n self._endYear = end_year\n self._climMonth = clim_month\n self._fill = fill\n self._spark_master = spark_master\n self._spark_nexecs = spark_nexecs\n self._spark_nparts = spark_nparts\n\n def _find_global_tile_set(self):\n if type(self._ds) in (list,tuple):\n ds = self._ds[0]\n else:\n ds = self._ds\n ntiles = 0\n ##################################################################\n # Temporary workaround until we have dataset metadata to indicate\n # temporal resolution.\n if \"monthly\" in ds.lower():\n t_incr = 2592000 # 30 days\n else:\n t_incr = 86400 # 1 day\n ##################################################################\n t = self._endTime\n self._latRes = None\n self._lonRes = None\n while ntiles == 0:\n nexus_tiles = self._tile_service.get_tiles_bounded_by_box(self._minLat, self._maxLat, self._minLon, self._maxLon, ds=ds, start_time=t-t_incr, end_time=t)\n ntiles = len(nexus_tiles)\n self.log.debug('find_global_tile_set got {0} tiles'.format(ntiles))\n if ntiles > 0:\n for tile in nexus_tiles:\n self.log.debug('tile coords:')\n self.log.debug('tile lats: {0}'.format(tile.latitudes))\n self.log.debug('tile lons: {0}'.format(tile.longitudes))\n if self._latRes is None:\n lats = tile.latitudes.data\n if (len(lats) > 1):\n self._latRes = abs(lats[1]-lats[0])\n if self._lonRes is None:\n lons = tile.longitudes.data\n if (len(lons) > 1):\n self._lonRes = abs(lons[1]-lons[0])\n if ((self._latRes is not None) and \n (self._lonRes is not None)):\n break\n if (self._latRes is None) or (self._lonRes is None):\n ntiles = 0\n else:\n lats_agg = np.concatenate([tile.latitudes.compressed()\n for tile in nexus_tiles])\n lons_agg = np.concatenate([tile.longitudes.compressed()\n for tile in nexus_tiles])\n self._minLatCent = np.min(lats_agg)\n self._maxLatCent = np.max(lats_agg)\n self._minLonCent = np.min(lons_agg)\n self._maxLonCent = np.max(lons_agg)\n t -= t_incr\n return nexus_tiles\n\n def _find_tile_bounds(self, t):\n lats = t.latitudes\n lons = t.longitudes\n if (len(lats.compressed()) > 0) and (len(lons.compressed()) > 0):\n min_lat = np.ma.min(lats)\n max_lat = np.ma.max(lats)\n min_lon = np.ma.min(lons)\n max_lon = np.ma.max(lons)\n good_inds_lat = np.where(lats.mask == False)[0]\n good_inds_lon = np.where(lons.mask == False)[0]\n min_y = np.min(good_inds_lat)\n max_y = np.max(good_inds_lat)\n min_x = np.min(good_inds_lon)\n max_x = np.max(good_inds_lon)\n bounds = (min_lat, max_lat, min_lon, max_lon,\n min_y, max_y, min_x, max_x)\n else:\n self.log.warn('Nothing in this tile!')\n bounds = None\n return bounds\n \n @staticmethod\n def query_by_parts(tile_service, min_lat, max_lat, min_lon, max_lon, \n dataset, start_time, end_time, part_dim=0):\n nexus_max_tiles_per_query = 100\n #print 'trying query: ',min_lat, max_lat, min_lon, max_lon, \\\n # dataset, start_time, end_time\n try:\n tiles = \\\n tile_service.find_tiles_in_box(min_lat, max_lat, \n min_lon, max_lon, \n dataset, \n start_time=start_time, \n end_time=end_time,\n fetch_data=False)\n assert(len(tiles) <= nexus_max_tiles_per_query)\n except:\n #print 'failed query: ',min_lat, max_lat, min_lon, max_lon, \\\n # dataset, start_time, end_time\n if part_dim == 0: \n # Partition by latitude.\n mid_lat = (min_lat + max_lat) / 2\n nexus_tiles = SparkHandler.query_by_parts(tile_service, \n min_lat, mid_lat, \n min_lon, max_lon, \n dataset, \n start_time, end_time,\n part_dim=part_dim)\n nexus_tiles.extend(SparkHandler.query_by_parts(tile_service, \n mid_lat, \n max_lat, \n min_lon, \n max_lon, \n dataset, \n start_time, \n end_time,\n part_dim=part_dim))\n elif part_dim == 1: \n # Partition by longitude.\n mid_lon = (min_lon + max_lon) / 2\n nexus_tiles = SparkHandler.query_by_parts(tile_service, \n min_lat, max_lat, \n min_lon, mid_lon, \n dataset, \n start_time, end_time,\n part_dim=part_dim)\n nexus_tiles.extend(SparkHandler.query_by_parts(tile_service, \n min_lat, \n max_lat, \n mid_lon, \n max_lon, \n dataset, \n start_time, \n end_time,\n part_dim=part_dim))\n elif part_dim == 2:\n # Partition by time.\n mid_time = (start_time + end_time) / 2\n nexus_tiles = SparkHandler.query_by_parts(tile_service, \n min_lat, max_lat, \n min_lon, max_lon, \n dataset, \n start_time, mid_time,\n part_dim=part_dim)\n nexus_tiles.extend(SparkHandler.query_by_parts(tile_service, \n min_lat, \n max_lat, \n min_lon, \n max_lon, \n dataset, \n mid_time, \n end_time,\n part_dim=part_dim))\n else:\n # No exception, so query Cassandra for the tile data.\n #print 'Making NEXUS query to Cassandra for %d tiles...' % \\\n # len(tiles)\n #t1 = time.time()\n #print 'NEXUS call start at time %f' % t1\n #sys.stdout.flush()\n nexus_tiles = list(tile_service.fetch_data_for_tiles(*tiles))\n nexus_tiles = list(tile_service.mask_tiles_to_bbox(min_lat, max_lat,\n min_lon, max_lon,\n nexus_tiles))\n #t2 = time.time()\n #print 'NEXUS call end at time %f' % t2\n #print 'Seconds in NEXUS call: ', t2-t1\n #sys.stdout.flush()\n\n #print 'Returning %d tiles' % len(nexus_tiles)\n return nexus_tiles\n\n @staticmethod\n def _prune_tiles(nexus_tiles):\n del_ind = np.where([np.all(tile.data.mask) for tile in nexus_tiles])[0]\n for i in np.flipud(del_ind):\n del nexus_tiles[i]\n\n def _lat2ind(self,lat):\n return int((lat-self._minLatCent)/self._latRes)\n\n def _lon2ind(self,lon):\n return int((lon-self._minLonCent)/self._lonRes)\n\n def _ind2lat(self,y):\n return self._minLatCent+y*self._latRes\n\n def _ind2lon(self,x):\n return self._minLonCent+x*self._lonRes\n\n def _create_nc_file_time1d(self, a, fname, varname, varunits=None,\n fill=None):\n self.log.debug('a={0}'.format(a))\n self.log.debug('shape a = {0}'.format(a.shape))\n assert len(a.shape) == 1\n time_dim = len(a)\n rootgrp = Dataset(fname, \"w\", format=\"NETCDF4\")\n rootgrp.createDimension(\"time\", time_dim)\n vals = rootgrp.createVariable(varname, \"f4\", dimensions=(\"time\",),\n fill_value=fill)\n times = rootgrp.createVariable(\"time\", \"f4\", dimensions=(\"time\",))\n vals[:] = [d['mean'] for d in a]\n times[:] = [d['time'] for d in a]\n if varunits is not None:\n vals.units = varunits\n times.units = 'seconds since 1970-01-01 00:00:00'\n rootgrp.close()\n\n def _create_nc_file_latlon2d(self, a, fname, varname, varunits=None,\n fill=None):\n self.log.debug('a={0}'.format(a))\n self.log.debug('shape a = {0}'.format(a.shape))\n assert len(a.shape) == 2\n lat_dim, lon_dim = a.shape\n rootgrp = Dataset(fname, \"w\", format=\"NETCDF4\")\n rootgrp.createDimension(\"lat\", lat_dim)\n rootgrp.createDimension(\"lon\", lon_dim)\n vals = rootgrp.createVariable(varname, \"f4\",\n dimensions=(\"lat\",\"lon\",),\n fill_value=fill)\n lats = rootgrp.createVariable(\"lat\", \"f4\", dimensions=(\"lat\",))\n lons = rootgrp.createVariable(\"lon\", \"f4\", dimensions=(\"lon\",))\n vals[:,:] = a\n lats[:] = np.linspace(self._minLatCent, \n self._maxLatCent, lat_dim)\n lons[:] = np.linspace(self._minLonCent,\n self._maxLonCent, lon_dim)\n if varunits is not None:\n vals.units = varunits\n lats.units = \"degrees north\"\n lons.units = \"degrees east\"\n rootgrp.close()\n\n def _create_nc_file(self, a, fname, varname, **kwargs):\n self._create_nc_file_latlon2d(a, fname, varname, **kwargs)\n\n\ndef executeInitializers(config):\n [wrapper.init(config) for wrapper in AVAILABLE_INITIALIZERS]\n" ]
[ [ "numpy.shape", "scipy.signal.butter", "numpy.average", "scipy.signal.filtfilt" ], [ "numpy.linspace", "numpy.min", "numpy.ma.min", "numpy.flipud", "numpy.all", "numpy.max", "numpy.ma.max", "numpy.where" ] ]
aniket-gupta1/Reinforcement_Learning_Swarm
[ "d333606b8d275e41985ecc3015cd841b0e30fce5", "d333606b8d275e41985ecc3015cd841b0e30fce5" ]
[ "Environment_iteration9.py", "Environment_iteration6.py" ]
[ "import copy\nimport time\nimport numpy as np\nfrom utils import *\nimport matplotlib.pyplot as plt\nfrom shapely.geometry import Point\nimport matplotlib.animation as animation\nfrom shapely.geometry.polygon import Polygon\n\n# Formation reward changed to a negative function based on distance from mean center\nclass Swarm(object):\n\t\"\"\"\n\t\"\"\"\n\tdef __init__(self, v_max = 2, v_min = 0, safe_distance = 0.5, render_var=False):\n\t\tself.N, self.N_f, self.Weight_matrix, self.WP_list = Load_files()\n\n\t\tself.wp_rad = 0.5\n\t\tself.counter = 0\n\t\tself.render_var = render_var\n\t\tself.v_max = v_max\n\t\tself.v_min = v_min\n\t\tself.max_steps = 400\n\t\tself.wp_update_var = 0\n\t\tself.safe_distance = safe_distance\n\t\tself.timestep = 0.1\n\n\t\tself.done = False\n\n\t\tif self.render_var:\n\t\t\tself.show_plot()\n\n\t\t# Define reward constants\n\t\tself.goal_reward_const = -1\n\t\tself.formation_reward_const = -1\n\n\t\t# Define rewards\n\t\tself.goal_reward = 10\n\t\tself.formation_reward = 1\n\t\tself.collision_penalty = -100\n\n\t\tself.const = 15\n\t\tself.boundary_points = [(self.const,self.const),(-self.const,self.const),(-self.const,-self.const),(self.const,-self.const)]\n\t\tself.start_location = np.array([[i,np.random.randint(3)] for i in range(self.N)]).astype('float64')\n\t\t\n\t\t# Iterators for storing the position of agents\n\t\tself.pos = self.start_location\n\t\tself.pos_old = self.start_location\n\n\tdef show_plot(self):\n\t\tplt.show()\n\n\tdef get_distance(self, point1, point2):\n\t\treturn np.linalg.norm(point1-point2)\n\n\tdef restore_start_location(self):\n\t\t# Restore the original values of pos\n\t\tself.WP_list = list(np.random.permutation([[-8,9],[-8,-9],[8,-9],[8,9]]))\n\t\tself.pos = copy.copy(self.start_location)\n\t\tself.pos_old = copy.copy(self.start_location)\n\t\tself.wp_update_var = 0\n\n\tdef reset(self):\n\t\tself.restore_start_location()\n\n\t\tgoal_pos = self.get_current_waypoint()\n\t\tstate = list()\n\n\t\tfor pos1 in self.pos:\n\t\t\tstate.append(pos1)\n\n\t\tstate.append(goal_pos)\n\t\tstate = list(np.ndarray.flatten(np.array(state)))\n\t\t\n\t\treturn state\n\t\n\tdef render(self):\n\t\t# wap = self.get_current_waypoint()\n\t\t# x,y = [pos[0] for pos in self.pos]+[wap[0]], [pos[1] for pos in self.pos]+[wap[1]]\n\t\t# plt.clf()\n\t\t# plt.axis([-10, 10, -10, 10])\n\t\t# plt.scatter(x,y)\n\t\t# plt.pause(0.001)\n\t\twap = self.get_current_waypoint()\n\t\tx,y = [pos[0] for pos in self.pos], [pos[1] for pos in self.pos]\n\t\tplt.clf()\n\t\tplt.axis([-self.const, self.const, -self.const, self.const])\n\t\tplt.scatter(x,y)\n\t\tplt.scatter(wap[0], wap[1], color='red')\n\t\tplt.pause(0.001)\n\n\tdef action_sample(self):\n\t\treturn np.random.uniform(-self.v_max, self.v_max, (self.N*2))\t\t\n\n\tdef boundary_check(self):\n\t\tvar = False\n\n\t\tpoint = [Point(pos[0], pos[1]) for pos in self.pos]\n\t\tpolygon = Polygon(self.boundary_points)\n\n\t\tvar = np.mean([not polygon.contains(p) for p in point])\n\n\t\tif not var:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\tdef get_current_waypoint(self):\n\t\treturn self.WP_list[self.wp_update_var]\n\n\tdef update_pos(self, v):\n\t\tself.pos_old = copy.copy(self.pos)\n\t\tself.pos += v*self.timestep\n\t\t#time.sleep(0.1)\n\n\tdef step(self, v):\n\t\tself.counter+=1\n\t\t#print(f\"Step: {self.counter}\")\n\t\tgoal_pos = self.get_current_waypoint()\t\t\t\t\t# Waypoint to be followed\n\t\tstate = list()\t\t\t\t\t\t\t\t\t\t\t# distance list (input to the actor network)\n\t\treward = 0\t\t\t\t\t\t\t\t\t\t\t\t# intialize reward variable\n\t\tif self.done:\n\t\t\tself.done = False\n\t\t\tself.restore_start_location()\n\n\t\t# End episode if max number of steps are reached\n\t\tif self.counter%self.max_steps == 0:\n\t\t\tself.counter = 0\n\t\t\tself.done = True\n\n\t\t# Check if all agents are within environment boundaries\n\t\tvar = self.boundary_check()\n\t\tif var:\n\t\t\tself.done = True\n\t\t\tself.counter=0\n\n\t\t# Reshape and limit the velocity vector\n\t\tv = np.reshape(v, (self.N,2))\n\t\tv = rescale_vector(v, self.v_max, self.v_min)\n\n\t\t# Update vehicle position using current velocity\n\t\tself.update_pos(v)\n\n\t\ttemp_var=0\n\t\t# Find the value of next_state and reward\n\t\tfor (i, pos1), pos1_old in zip(enumerate(self.pos), self.pos_old):\n\t\t\t# Calculate formation reward\n\t\t\tfor (j, pos2), pos2_old in zip(enumerate(self.pos), self.pos_old):\n\t\t\t\tif i==j:\n\t\t\t\t\tcontinue\n\t\t\t\tdist = self.get_distance(pos1,pos2)\n\n\t\t\t\tif abs(dist-self.Weight_matrix[i][j])<0.1*self.Weight_matrix[i][j]:\n\t\t\t\t\treward += 0\n\t\t\t\telif dist<self.safe_distance:\n\t\t\t\t\treward += self.collision_penalty + (self.formation_reward_const/self.N) * abs(dist-self.Weight_matrix[i][j])\n\t\t\t\telse:\n\t\t\t\t\treward += (self.formation_reward_const/self.N) * abs(dist-self.Weight_matrix[i][j])\n\n\t\t\t# Goal Reward\n\t\t\tgoal_distance = self.get_distance(pos1, goal_pos)\n\t\t\t#goal_distance_old = self.get_distance(pos1_old, goal_pos)\n\t\t\tif abs(goal_distance-self.Weight_matrix[i][self.N])<=self.wp_rad:\n\t\t\t\ttemp_var+=1\n\t\t\telse:\n\t\t\t\treward += (self.goal_reward_const/self.N) * abs(goal_distance-self.Weight_matrix[i][self.N])\n\n\t\t\t# Updating the goal position\n\t\t\tif temp_var==self.N:\n\t\t\t\tprint(\"GOAL\")\n\t\t\t\tself.wp_update_var+=1\n\t\t\t\tif self.wp_update_var == len(self.WP_list):\n\t\t\t\t\tself.done = True\n\t\t\t\t\tprint(\"END GOAL\")\n\t\t\t\t\tself.wp_update_var-=1\n\n\t\t\tstate.append(pos1)\n\t\tstate.append(goal_pos)\n\t\tstate = list(np.ndarray.flatten(np.array(state)))\n\n\t\t#print(f\"Step: {self.counter}| Reward:{reward}\")\n\t\treturn state, reward, self.done, \"gibberish\"\n\n\tdef close(self):\n\t\tplt.close()\n\nif __name__ == '__main__':\n\ta = Swarm()\n\tplt.show()", "import copy\nimport time\nimport numpy as np\nfrom utils import *\nimport matplotlib.pyplot as plt\nfrom shapely.geometry import Point\nimport matplotlib.animation as animation\nfrom shapely.geometry.polygon import Polygon\n\n# Formation reward changed to a negative function based on distance from mean center\nclass Swarm(object):\n\t\"\"\"\n\t\"\"\"\n\tdef __init__(self, v_max = 2, v_min = 0, safe_distance = 0.5, render_var=False):\n\t\tself.N, self.N_f, self.Weight_matrix, self.WP_list = Load_files()\n\n\t\tself.wp_rad = 0.5\n\t\tself.counter = 0\n\t\tself.render_var = render_var\n\t\tself.v_max = v_max\n\t\tself.v_min = v_min\n\t\tself.max_steps = 400\n\t\tself.wp_update_var = 0\n\t\tself.safe_distance = safe_distance\n\t\tself.timestep = 0.1\n\n\t\tself.done = False\n\n\t\tif self.render_var:\n\t\t\tself.show_plot()\n\n\t\t# Define reward constants\n\t\tself.goal_reward_const = -1\n\t\tself.formation_reward_const = -1\n\n\t\t# Define rewards\n\t\tself.goal_reward = 10\n\t\tself.formation_reward = 1\n\t\tself.collision_penalty = -1\n\n\t\tself.goal_pos = self.WP_list[self.wp_update_var]\n\t\tself.boundary_points = [(20,20),(-20,20),(-20,-20),(20,-20)]\n\t\tself.start_location = np.array([[i,np.random.randint(3)] for i in range(self.N)]).astype('float64')\n\t\t\n\t\t# Iterators for storing the position of agents\n\t\tself.pos = self.start_location\n\t\tself.pos_old = self.start_location\n\n\tdef show_plot(self):\n\t\tplt.show()\n\n\tdef get_distance(self, point1, point2):\n\t\treturn np.linalg.norm(point1-point2)\n\n\tdef restore_start_location(self):\n\t\t# Restore the original values of pos\n\t\tself.WP_list = list(np.random.permutation([[-8,9],[-8,-9],[8,-9],[8,9]]))\n\t\tself.pos = copy.copy(self.start_location)\n\t\tself.pos_old = copy.copy(self.start_location)\n\t\tself.wp_update_var = 0\n\n\tdef reset(self):\n\t\tself.restore_start_location()\n\n\t\tgoal_pos = self.get_current_waypoint()\n\t\tstate = list()\n\n\t\tfor pos1 in self.pos:\n\t\t\tstate.append(pos1)\n\n\t\tstate.append(goal_pos)\n\t\tstate = list(np.ndarray.flatten(np.array(state)))\n\t\t\n\t\treturn state\n\t\n\tdef render(self):\n\t\t# wap = self.get_current_waypoint()\n\t\t# x,y = [pos[0] for pos in self.pos]+[wap[0]], [pos[1] for pos in self.pos]+[wap[1]]\n\t\t# plt.clf()\n\t\t# plt.axis([-10, 10, -10, 10])\n\t\t# plt.scatter(x,y)\n\t\t# plt.pause(0.001)\n\t\twap = self.get_current_waypoint()\n\t\tx,y = [pos[0] for pos in self.pos], [pos[1] for pos in self.pos]\n\t\tplt.clf()\n\t\tplt.axis([-20, 20, -20, 20])\n\t\tplt.scatter(x,y)\n\t\tplt.scatter(wap[0], wap[1], color='red')\n\t\tplt.pause(0.001)\n\n\tdef action_sample(self):\n\t\treturn np.random.uniform(-self.v_max, self.v_max, (self.N*2))\t\t\n\n\tdef boundary_check(self):\n\t\tvar = False\n\n\t\tpoint = [Point(pos[0], pos[1]) for pos in self.pos]\n\t\tpolygon = Polygon(self.boundary_points)\n\n\t\tvar = np.mean([not polygon.contains(p) for p in point])\n\n\t\tif not var:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\tdef update_waypoint(self):\n\t\tself.wp_update_var+=1\n\t\tif self.wp_update_var == len(self.WP_list)-1:\n\t\t\tself.done = True\n\t\t\tself.wp_update_var-=1\n\n\t\tself.goal_pos = self.WP_list[self.wp_update_var]\n\n\t\treturn self.goal_pos\n\n\tdef get_current_waypoint(self):\n\t\treturn self.WP_list[self.wp_update_var]\n\n\tdef update_pos(self, v):\n\t\tself.pos_old = copy.copy(self.pos)\n\t\tself.pos += v*self.timestep\n\t\t#time.sleep(0.1)\n\n\tdef step(self, v):\n\t\tself.counter+=1\n\t\t#print(f\"Step: {self.counter}\")\n\t\tgoal_pos = self.get_current_waypoint()\t\t\t\t\t# Waypoint to be followed\n\t\tstate = list()\t\t\t\t\t\t\t\t\t\t\t# distance list (input to the actor network)\n\t\treward = 0\t\t\t\t\t\t\t\t\t\t\t\t# intialize reward variable\n\t\tif self.done:\n\t\t\tself.done = False\n\t\t\tself.restore_start_location()\n\n\t\t# End episode if max number of steps are reached\n\t\tif self.counter%self.max_steps == 0:\n\t\t\tprint(\"Max steps \")\n\t\t\tself.counter = 0\n\t\t\tself.done = True\n\n\t\t# Check if all agents are within environment boundaries\n\t\tvar = self.boundary_check()\n\t\tif var:\n\t\t\tprint(\"Crossed boundaries\")\n\t\t\tself.done = True\n\t\t\tself.counter=0\n\n\t\t# Reshape and limit the velocity vector\n\t\tv = np.reshape(v, (self.N,2))\n\t\tv = rescale_vector(v, self.v_max, self.v_min)\n\n\t\t# Update vehicle position using current velocity\n\t\tself.update_pos(v)\n\n\t\t# Goal position reward\n\t\ttemp_var=0\n\t\tfor i, pos1 in enumerate(self.pos):\n\t\t\tgoal_distance = self.get_distance(pos1, goal_pos)\n\n\t\t\tif abs(goal_distance-self.Weight_matrix[i][self.N])<=self.wp_rad:\n\t\t\t\ttemp_var+=1\n\t\t\t\t#print(f\"Goal reached by agent: {i+1}\")\n\t\t\telse:\n\t\t\t\treward += (self.goal_reward_const/self.N) * abs(goal_distance-self.Weight_matrix[i][self.N])\n\t\t\t\n\t\t\tif temp_var==self.N:\n\t\t\t\treward += 10\n\t\t\t\tgoal_pos = self.update_waypoint()\n\t\t\t\tprint(\"GOAL\")\n\n\t\t# Find the value of next_state and reward\n\t\tfor i, pos1 in enumerate(self.pos):\n\n\t\t\tfor j, pos2 in enumerate(self.pos):\n\t\t\t\tif i==j:\n\t\t\t\t\tcontinue\n\n\t\t\t\tdist = self.get_distance(pos1,pos2)\n\n\t\t\t\tif dist<self.safe_distance:\n\t\t\t\t\t#print(\"COLLISION\")\n\t\t\t\t\treward += self.collision_penalty + (self.formation_reward_const/self.N) * abs(dist-self.Weight_matrix[i][j])\n\t\t\t\telse:\n\t\t\t\t\treward += (self.formation_reward_const/self.N) * abs(dist-self.Weight_matrix[i][j])\n\n\t\t\tstate.append(pos1)\n\t\tstate.append(goal_pos)\n\t\tstate = list(np.ndarray.flatten(np.array(state)))\n\n\t\t#print(f\"Step: {self.counter}| Reward:{reward}\")\n\t\treturn state, reward, self.done, \"gibberish\"\n\n\tdef close(self):\n\t\tplt.close()\n\nif __name__ == '__main__':\n\ta = Swarm()\n\tplt.show()" ]
[ [ "matplotlib.pyplot.scatter", "numpy.reshape", "numpy.linalg.norm", "matplotlib.pyplot.clf", "numpy.random.permutation", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "numpy.random.uniform", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.pause", "numpy.random.randint" ], [ "matplotlib.pyplot.scatter", "numpy.reshape", "numpy.linalg.norm", "matplotlib.pyplot.clf", "numpy.random.permutation", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "numpy.random.uniform", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.pause", "numpy.random.randint" ] ]
DarlanAjlune/Projeto-Controle-PI
[ "417e544b9f94cb4d4e5f672556fe25ae08e8d891" ]
[ "Projeto_com_interface/Utils/all.py" ]
[ "from Utils.AjustePI import PI\nfrom control.timeresp import step_info\nimport scipy.io \nfrom Utils.SintoniaRF import RF\nimport matplotlib.pyplot as plt\nfrom Utils.MinimosQuadrados import MQ\nfrom Utils.SintoniaLugarRaizes import LR\nimport numpy as np\n# from SaidaMalhaFechadaPI import SMF\nfrom control.matlab import tf, feedback, step\n\n# função que calcula tudo\ndef calcula_tudo(mpico,tacomoda,filepath):\n data = scipy.io.loadmat(filepath)\n # arquivo\n global tempo\n tempo = data['T'][0]\n global u\n u = data['x1'].reshape(-1)\n global y\n y = data['y1'].reshape(-1)\n sp = u[0]\n # Função de transferência mínimos quadrados\n global ft\n ft = MQ.calculo(y, u, tempo, False)\n\n # Máximo pico\n #------------\n mp = mpico # PADRÃO = 0.15 \n #------------\n \n # Tempo de Acomodação\n #------------\n ta = tacomoda # Pedir para entrar dps # PADRÃO = 75\n #------------\n\n # função de transferência\n mf = feedback(ft, 1)\n # array resposta MF\n respostaMF, _ = step(mf*u[0], tempo)\n \n # plot gráfico malha fechada\n if False:\n info = step_info(mf*u[0])\n plt.plot(tempo, respostaMF)\n plt.xlabel('Tempo(s)')\n plt.ylabel('Distância(cm)')\n plt.title('Reposta em Malha Fechada') \n plt.legend([f\"Rise Time:{info['RiseTime']:.2f}s\\nOvershoot:{info['Overshoot']:.2f}%\\nSettling Time:{info['SettlingTime']:.2f}s\\nPeak:{info['Peak']:.2f}cm\\nPeak Time:{info['PeakTime']:.2f}s\"])\n plt.grid()\n plt.show()\n\n # método sintonia LGR\n kpLR, kiLR, ftLR = LR.calculo(mp, ta, u[0], tempo, ft.num[0][0]/ft.den[0][0][1], ft.den[0][0]/ft.den[0][0][1], False)\n # método sintonia RF\n kpRF, kiRF, ftRF = RF.calculo(mp, ta, u[0], tempo, ft.num[0][0]/ft.den[0][0][1], ft.den[0][0]/ft.den[0][0][1], False)\n\n # array p/ plot\n respostaLR, _ = step(ftLR*u[0], tempo)\n respostaRF, _ = step(ftRF*u[0], tempo)\n \n # plot saída sintonizada SEM AJUSTE FINO\n if False:\n respostaLR, _ = step(ftLR*u[0], tempo)\n respostaRF, _ = step(ftRF*u[0], tempo)\n infoLR = step_info(ftLR)\n infoRF = step_info(ftRF)\n plt.plot(tempo, respostaLR, 'g', tempo, respostaRF, 'b')\n plt.xlabel('Tempo(s)')\n plt.ylabel('Distância(cm)')\n plt.legend([f\"Lugar Geométrico das Raízes\\nRise Time:{infoLR['RiseTime']:.2f}s\\nOvershoot:{infoLR['Overshoot']:.2f}%\\nSettling Time:{infoLR['SettlingTime']:.2f}s\\nPeak:{infoLR['Peak']:.2f}cm\\nPeak Time:{infoLR['PeakTime']:.2f}s\", f\"Respsota em Frequência\\nRise Time:{infoRF['RiseTime']:.2f}s\\nOvershoot:{infoRF['Overshoot']:.2f}%\\nSettling Time:{infoRF['SettlingTime']:.2f}s\\nPeak:{infoRF['Peak']:.2f}cm\\nPeak Time:{infoRF['PeakTime']:.2f}s\"])\n plt.title('Comparação dos Métodos de Sintonia em Malha Fechada')\n plt.grid()\n plt.show()\n \n #RESPOSTA AJUSTADA\n # kpLrAjust = kpLR+5\n # kiLrAjust = kiLR+5\n # kpRfAjust = kpRF+4\n # kiRfAjust = kiRF+4\n # u[0] == sp\n respostaSMFLR, infoLR = PI.mostraPI(ft.num[0][0], ft.den[0][0], u[0], tempo, kpLR, kiLR, 'Resposta Malha Fechada Com Controlador - Sintonia LGR', False)\n respostaSMFRF, infoRF = PI.mostraPI(ft.num[0][0], ft.den[0][0], u[0], tempo, kpRF, kiRF, 'Resposta Malha Fechada Com Controlador - Sintonia RF', False)\n\n #RESPOSTA MALHA ABERTA\n respostaMA, _ = step(ft*50, tempo)\n\n # plot todos os gráficos\n if False:\n y_k, _ = step(ft*50, tempo)\n plt.plot(tempo, y_k, tempo, respostaMF, tempo, respostaLR, tempo, respostaRF, tempo, respostaSMFLR, tempo, respostaSMFRF)\n plt.legend(['Malha Aberta', 'Malha Fechada', 'Malha Fechada - LR - PI', 'Malha Fechada - RF - PI', 'Malha Fechada - LR - PI Ajustado', 'Malha Fechada - RF - PI Ajustado'])\n plt.xlabel('Tempo(s)')\n plt.ylabel('Distância(cm)')\n plt.grid()\n plt.show()\n\n # metadados dos dois sistemas controlados por diferentes métodos\n metadataLR = []\n metadataLR.append(infoLR['RiseTime'])\n metadataLR.append(infoLR['Overshoot'])\n metadataLR.append(infoLR['SettlingTime'])\n metadataLR.append(infoLR['Peak'])\n metadataLR.append(infoLR['PeakTime'])\n\n metadataRF = []\n metadataRF.append(infoRF['RiseTime'])\n metadataRF.append(infoRF['Overshoot'])\n metadataRF.append(infoRF['SettlingTime'])\n metadataRF.append(infoRF['Peak'])\n metadataRF.append(infoRF['PeakTime'])\n\n return kpLR, kiLR, kpRF, kiRF, sp, tempo, respostaMF, respostaRF, respostaLR, respostaSMFLR, respostaSMFRF, respostaMA, metadataLR, metadataRF\n\n# função para ser usada nos plots em que só ajustamos sp ou u[0] e os K's\ndef calcula_posterior(sp,kpLrAjust,kiLrAjust,kpRfAjust,kiRfAjust):\n respostaSMFLR, infoLR = PI.mostraPI(ft.num[0][0], ft.den[0][0], u[0], tempo, kpLrAjust, kiLrAjust, 'Resposta Malha Fechada Com Controlador - Sintonia LGR', False)\n respostaSMFRF, infoRF = PI.mostraPI(ft.num[0][0], ft.den[0][0], u[0], tempo, kpRfAjust, kiRfAjust, 'Resposta Malha Fechada Com Controlador - Sintonia RF', False)\n\n # metadados dos dois sistemas controlados por diferentes métodos\n metadataLR = []\n metadataLR.append(infoLR['RiseTime'])\n metadataLR.append(infoLR['Overshoot'])\n metadataLR.append(infoLR['SettlingTime'])\n metadataLR.append(infoLR['Peak'])\n metadataLR.append(infoLR['PeakTime'])\n\n metadataRF = []\n metadataRF.append(infoRF['RiseTime'])\n metadataRF.append(infoRF['Overshoot'])\n metadataRF.append(infoRF['SettlingTime'])\n metadataRF.append(infoRF['Peak'])\n metadataRF.append(infoRF['PeakTime'])\n\n return tempo , respostaSMFLR, respostaSMFRF, metadataLR, metadataRF\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
Niccolo-Marcucci/meep_objects
[ "8f4efe54eb206f536331864987624c98de374ef8" ]
[ "python_utils.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n% Copyright 2020 Niccolò Marcucci <[email protected]>\n%\n% Licensed under the Apache License, Version 2.0 (the \"License\");\n% you may not use this file except in compliance with the License.\n% You may obtain a copy of the License at\n%\n% http://www.apache.org/licenses/LICENSE-2.0\n%\n% Unless required by applicable law or agreed to in writing, software\n% distributed under the License is distributed on an \"AS IS\" BASIS,\n% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n% See the License for the specific language governing permissions and\n% limitations under the License.\n%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\"\"\"\n\nimport numpy as np\nfrom scipy.io import loadmat, savemat\nfrom math import pi, cos, sin, tan, atan, atan2, sqrt\nfrom matplotlib import pyplot as plt, cm, colors, widgets\n\ndef convert_seconds (elapsed):\n minutes = np.floor(elapsed/60)\n secs = elapsed-minutes*60\n secs = np.round(secs*100)/100\n\n hours = np.int_(np.floor(minutes/60))\n minutes = np.int_(minutes-hours*60)\n\n return f'{hours}h-{minutes}min-{secs}s'\n\ndef plot_data_section(data, x=None, y=None, z=None):\n\n # Create the figure and the line that we will manipulate\n fig = plt.figure()\n ax = plt.axes()\n plt.imshow(data[:,:,0], vmax=9.0, vmin=1.0)\n\n\n # adjust the main plot to make room for the sliders\n plt.subplots_adjust(left=0.2)\n axcolor = 'lightgoldenrodyellow'\n\n # Make a vertically oriented slider to control the amplitude\n z = plt.axes([0.1, 0.25, 0.0225, 0.63], facecolor=axcolor)\n z_slider = widgets.Slider(\n ax=z,\n label=\"Z\",\n valmin=0,\n valmax=np.shape(data)[2],\n valinit=0,\n valstep=np.arange(0, np.shape(data)[2], dtype=int),\n orientation=\"vertical\"\n )\n\n # Create a `matplotlib.widgets.Button` to reset the sliders to initial values.\n resetax = plt.axes([0.025, 0.025, 0.1, 0.04])\n # button = widgets.Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')\n\n layer = widgets.TextBox(resetax,'','0', color=axcolor, hovercolor='0.975')\n\n # The function to be called anytime a slider's value changes\n def update(val):\n ax.imshow(data[:, :, z_slider.val], vmax=3.0, vmin=1.0)\n fig.canvas.draw_idle()\n layer.set_val(f'{val}')\n\n # register the update function with each slider\n z_slider.on_changed(update)\n\n def set_slider(event):\n z_slider.val = int(layer.text)\n # button.on_clicked(reset)\n layer.on_submit(set_slider)\n\n plt.show()\n\ndef plot_image(x, y, image, n_grid = 11, **kwargs):\n # fig, ax = plt.subplots(1,1)\n\n image = plt.imshow(np.transpose(image), origin='lower', **kwargs)\n\n x_label_list = np.round(np.linspace(x.min(), x.max(), n_grid), 2)\n y_label_list = np.round(np.linspace(y.min(), y.max(), n_grid), 2)\n\n x_positions = np.linspace(0, x.size, n_grid)\n y_positions = np.linspace(0, y.size, n_grid)\n\n plt.xticks(x_positions, x_label_list)\n plt.yticks(y_positions, y_label_list)\n\n return image.get_figure()" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.yticks", "numpy.linspace", "numpy.transpose", "numpy.int_", "matplotlib.pyplot.axes", "numpy.round", "matplotlib.widgets.TextBox", "numpy.shape", "numpy.floor", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
megvii-model/RLNAS
[ "a7e2ef9debcd06a93b075181a027b806b737b106" ]
[ "darts_search_space/cifar10/rlnas/evolution_search/super_model.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom operations import *\r\nfrom torch.autograd import Variable\r\nfrom genotypes import PRIMITIVES\r\nfrom genotypes import Genotype\r\nimport math\r\nimport numpy as np\r\nfrom config import config\r\nimport copy\r\nfrom utils import check_cand\r\n\r\nclass MixedOp(nn.Module):\r\n\r\n def __init__(self, C, stride):\r\n super(MixedOp, self).__init__()\r\n self._ops = nn.ModuleList()\r\n for idx, primitive in enumerate(PRIMITIVES):\r\n op = OPS[primitive](C, stride, True)\r\n op.idx = idx\r\n if 'pool' in primitive:\r\n op = nn.Sequential(op, nn.BatchNorm2d(C, affine=True))\r\n self._ops.append(op)\r\n\r\n def forward(self, x, rng):\r\n return self._ops[rng](x)\r\n\r\n\r\nclass Cell(nn.Module):\r\n\r\n def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):\r\n super(Cell, self).__init__()\r\n if reduction_prev:\r\n self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=True)\r\n else:\r\n self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=True)\r\n self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=True)\r\n self._steps = steps\r\n self._multiplier = multiplier\r\n self._C = C\r\n self.out_C = self._multiplier * C\r\n self.reduction = reduction\r\n\r\n self._ops = nn.ModuleList()\r\n self._bns = nn.ModuleList()\r\n self.time_stamp = 1 \r\n\r\n for i in range(self._steps):\r\n for j in range(2+i):\r\n stride = 2 if reduction and j < 2 else 1\r\n op = MixedOp(C, stride)\r\n self._ops.append(op)\r\n\r\n def forward(self, s0, s1, rngs):\r\n s0 = self.preprocess0(s0)\r\n s1 = self.preprocess1(s1)\r\n states = [s0, s1]\r\n offset = 0\r\n for i in range(self._steps):\r\n s = sum(self._ops[offset+j](h, rngs[offset+j]) for j, h in enumerate(states))\r\n offset += len(states)\r\n states.append(s)\r\n return torch.cat(states[-self._multiplier:], dim=1)\r\n\r\nclass Network(nn.Module):\r\n def __init__(self, C=16, num_classes=10, layers=8, steps=4, multiplier=4, stem_multiplier=3):\r\n super(Network, self).__init__()\r\n self._C = C\r\n self._num_classes = num_classes\r\n self._layers = layers\r\n self._steps = steps\r\n self._multiplier = multiplier\r\n\r\n C_curr = stem_multiplier * C\r\n\r\n self.stem = nn.Sequential(\r\n nn.Conv2d(3, C_curr, 3, padding=1, bias=False),\r\n nn.BatchNorm2d(C_curr)\r\n )\r\n\r\n C_prev_prev, C_prev, C_curr = C_curr, C_curr, C\r\n\r\n self.cells = nn.ModuleList()\r\n reduction_prev = False\r\n\r\n for i in range(layers):\r\n if i in [layers // 3, 2 * layers // 3]:\r\n C_curr *= 2\r\n reduction = True\r\n else:\r\n reduction = False\r\n cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)\r\n reduction_prev = reduction\r\n self.cells += [cell]\r\n C_prev_prev, C_prev = C_prev, multiplier * C_curr\r\n\r\n self.global_pooling = nn.AdaptiveAvgPool2d(1)\r\n self.classifier = nn.Linear(C_prev, num_classes)\r\n\r\n def forward(self, input, rng):\r\n s0 = s1 = self.stem(input)\r\n for i, cell in enumerate(self.cells):\r\n s0, s1 = s1, cell(s0, s1, rng)\r\n out = self.global_pooling(s1)\r\n logits = self.classifier(out.view(out.size(0),-1))\r\n return logits\r\n\r\nif __name__ == '__main__':\r\n from copy import deepcopy\r\n model = Network()\r\n operations = []\r\n for _ in range(config.edges):\r\n operations.append(list(range(config.op_num)))\r\n rng = [np.random.randint(len(config.blocks_keys)) for i in range(config.edges)]\r\n\r\n rngs = check_cand(rng, operations)\r\n x = torch.rand(4,3,32,32)\r\n logit = model(x, rngs)\r\n print('logit:{0}'.format(logit))" ]
[ [ "torch.cat", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.rand", "torch.nn.BatchNorm2d" ] ]
Arindam-1991/deep_reid
[ "ab68d95c2229ef5b832a6a6b614a9b91e4984bd5" ]
[ "torchreid/engine/engine.py" ]
[ "from __future__ import division, print_function, absolute_import\r\nimport time\r\nimport numpy as np\r\nimport os.path as osp\r\nimport datetime\r\nfrom collections import OrderedDict\r\nimport torch\r\nfrom torch.nn import functional as F\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\nfrom torchreid import metrics\r\nfrom torchreid.utils import (\r\n MetricMeter, AverageMeter, re_ranking, open_all_layers, save_checkpoint,\r\n open_specified_layers, visualize_ranked_results\r\n)\r\nfrom torchreid.losses import DeepSupervision\r\n\r\n\r\nclass Engine(object):\r\n r\"\"\"A generic base Engine class for both image- and video-reid.\r\n\r\n Args:\r\n datamanager (DataManager): an instance of ``torchreid.data.ImageDataManager``\r\n or ``torchreid.data.VideoDataManager``.\r\n use_gpu (bool, optional): use gpu. Default is True.\r\n \"\"\"\r\n\r\n def __init__(self, datamanager, use_gpu=True):\r\n self.datamanager = datamanager\r\n self.train_loader = self.datamanager.train_loader\r\n self.test_loader = self.datamanager.test_loader\r\n self.use_gpu = (torch.cuda.is_available() and use_gpu)\r\n self.writer = None\r\n self.epoch = 0\r\n\r\n self.model = None\r\n self.optimizer = None\r\n self.scheduler = None\r\n\r\n self._models = OrderedDict()\r\n self._optims = OrderedDict()\r\n self._scheds = OrderedDict()\r\n\r\n def register_model(self, name='model', model=None, optim=None, sched=None):\r\n if self.__dict__.get('_models') is None:\r\n raise AttributeError(\r\n 'Cannot assign model before super().__init__() call'\r\n )\r\n\r\n if self.__dict__.get('_optims') is None:\r\n raise AttributeError(\r\n 'Cannot assign optim before super().__init__() call'\r\n )\r\n\r\n if self.__dict__.get('_scheds') is None:\r\n raise AttributeError(\r\n 'Cannot assign sched before super().__init__() call'\r\n )\r\n\r\n self._models[name] = model\r\n self._optims[name] = optim\r\n self._scheds[name] = sched\r\n\r\n def get_model_names(self, names=None):\r\n names_real = list(self._models.keys())\r\n if names is not None:\r\n if not isinstance(names, list):\r\n names = [names]\r\n for name in names:\r\n assert name in names_real\r\n return names\r\n else:\r\n return names_real\r\n\r\n def save_model(self, epoch, rank1, save_dir, is_best=False):\r\n names = self.get_model_names()\r\n\r\n for name in names:\r\n save_checkpoint(\r\n {\r\n 'state_dict': self._models[name].state_dict(),\r\n 'epoch': epoch + 1,\r\n 'rank1': rank1,\r\n 'optimizer': self._optims[name].state_dict(),\r\n 'scheduler': self._scheds[name].state_dict()\r\n },\r\n osp.join(save_dir, name),\r\n is_best=is_best\r\n )\r\n\r\n def set_model_mode(self, mode='train', names=None):\r\n assert mode in ['train', 'eval', 'test']\r\n names = self.get_model_names(names)\r\n\r\n for name in names:\r\n if mode == 'train':\r\n self._models[name].train()\r\n else:\r\n self._models[name].eval()\r\n\r\n def get_current_lr(self, names=None):\r\n names = self.get_model_names(names)\r\n name = names[0]\r\n return self._optims[name].param_groups[-1]['lr']\r\n\r\n def update_lr(self, names=None):\r\n names = self.get_model_names(names)\r\n\r\n for name in names:\r\n if self._scheds[name] is not None:\r\n self._scheds[name].step()\r\n\r\n def run(\r\n self,\r\n save_dir='log',\r\n max_epoch=0,\r\n start_epoch=0,\r\n print_freq=10,\r\n fixbase_epoch=0,\r\n open_layers=None,\r\n start_eval=0,\r\n eval_freq=-1,\r\n test_only=False,\r\n dist_metric='euclidean',\r\n normalize_feature=False,\r\n visrank=False,\r\n visrank_topk=10,\r\n use_metric_cuhk03=False,\r\n ranks=[1, 5, 10, 20],\r\n rerank=False\r\n ):\r\n r\"\"\"A unified pipeline for training and evaluating a model.\r\n\r\n Args:\r\n save_dir (str): directory to save model.\r\n max_epoch (int): maximum epoch.\r\n start_epoch (int, optional): starting epoch. Default is 0.\r\n print_freq (int, optional): print_frequency. Default is 10.\r\n fixbase_epoch (int, optional): number of epochs to train ``open_layers`` (new layers)\r\n while keeping base layers frozen. Default is 0. ``fixbase_epoch`` is counted\r\n in ``max_epoch``.\r\n open_layers (str or list, optional): layers (attribute names) open for training.\r\n start_eval (int, optional): from which epoch to start evaluation. Default is 0.\r\n eval_freq (int, optional): evaluation frequency. Default is -1 (meaning evaluation\r\n is only performed at the end of training).\r\n test_only (bool, optional): if True, only runs evaluation on test datasets.\r\n Default is False.\r\n dist_metric (str, optional): distance metric used to compute distance matrix\r\n between query and gallery. Default is \"euclidean\".\r\n normalize_feature (bool, optional): performs L2 normalization on feature vectors before\r\n computing feature distance. Default is False.\r\n visrank (bool, optional): visualizes ranked results. Default is False. It is recommended to\r\n enable ``visrank`` when ``test_only`` is True. The ranked images will be saved to\r\n \"save_dir/visrank_dataset\", e.g. \"save_dir/visrank_market1501\".\r\n visrank_topk (int, optional): top-k ranked images to be visualized. Default is 10.\r\n use_metric_cuhk03 (bool, optional): use single-gallery-shot setting for cuhk03.\r\n Default is False. This should be enabled when using cuhk03 classic split.\r\n ranks (list, optional): cmc ranks to be computed. Default is [1, 5, 10, 20].\r\n rerank (bool, optional): uses person re-ranking (by Zhong et al. CVPR'17).\r\n Default is False. This is only enabled when test_only=True.\r\n \"\"\"\r\n\r\n if visrank and not test_only:\r\n raise ValueError(\r\n 'visrank can be set to True only if test_only=True'\r\n )\r\n\r\n if test_only:\r\n self.test(\r\n dist_metric=dist_metric,\r\n normalize_feature=normalize_feature,\r\n visrank=visrank,\r\n visrank_topk=visrank_topk,\r\n save_dir=save_dir,\r\n use_metric_cuhk03=use_metric_cuhk03,\r\n ranks=ranks,\r\n rerank=rerank\r\n )\r\n return\r\n\r\n if self.writer is None:\r\n self.writer = SummaryWriter(log_dir=save_dir)\r\n\r\n time_start = time.time()\r\n self.start_epoch = start_epoch\r\n self.max_epoch = max_epoch\r\n print('=> Start training')\r\n\r\n for self.epoch in range(self.start_epoch, self.max_epoch):\r\n self.train(\r\n print_freq=print_freq,\r\n fixbase_epoch=fixbase_epoch,\r\n open_layers=open_layers\r\n )\r\n\r\n if (self.epoch + 1) >= start_eval \\\r\n and eval_freq > 0 \\\r\n and (self.epoch+1) % eval_freq == 0 \\\r\n and (self.epoch + 1) != self.max_epoch:\r\n rank1 = self.test(\r\n dist_metric=dist_metric,\r\n normalize_feature=normalize_feature,\r\n visrank=visrank,\r\n visrank_topk=visrank_topk,\r\n save_dir=save_dir,\r\n use_metric_cuhk03=use_metric_cuhk03,\r\n ranks=ranks\r\n )\r\n self.save_model(self.epoch, rank1, save_dir)\r\n\r\n if self.max_epoch > 0:\r\n print('=> Final test')\r\n rank1 = self.test(\r\n dist_metric=dist_metric,\r\n normalize_feature=normalize_feature,\r\n visrank=visrank,\r\n visrank_topk=visrank_topk,\r\n save_dir=save_dir,\r\n use_metric_cuhk03=use_metric_cuhk03,\r\n ranks=ranks\r\n )\r\n self.save_model(self.epoch, rank1, save_dir)\r\n\r\n elapsed = round(time.time() - time_start)\r\n elapsed = str(datetime.timedelta(seconds=elapsed))\r\n print('Elapsed {}'.format(elapsed))\r\n if self.writer is not None:\r\n self.writer.close()\r\n\r\n def train(self, print_freq=10, fixbase_epoch=0, open_layers=None):\r\n losses = MetricMeter()\r\n batch_time = AverageMeter()\r\n data_time = AverageMeter()\r\n\r\n self.set_model_mode('train')\r\n\r\n self.two_stepped_transfer_learning(\r\n self.epoch, fixbase_epoch, open_layers\r\n )\r\n\r\n self.num_batches = len(self.train_loader)\r\n end = time.time()\r\n for self.batch_idx, data in enumerate(self.train_loader):\r\n data_time.update(time.time() - end)\r\n loss_summary = self.forward_backward(data)\r\n batch_time.update(time.time() - end)\r\n losses.update(loss_summary)\r\n\r\n if (self.batch_idx + 1) % print_freq == 0:\r\n nb_this_epoch = self.num_batches - (self.batch_idx + 1)\r\n nb_future_epochs = (\r\n self.max_epoch - (self.epoch + 1)\r\n ) * self.num_batches\r\n eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)\r\n eta_str = str(datetime.timedelta(seconds=int(eta_seconds)))\r\n print(\r\n 'epoch: [{0}/{1}][{2}/{3}]\\t'\r\n 'time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\r\n 'data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\r\n 'eta {eta}\\t'\r\n '{losses}\\t'\r\n 'lr {lr:.6f}'.format(\r\n self.epoch + 1,\r\n self.max_epoch,\r\n self.batch_idx + 1,\r\n self.num_batches,\r\n batch_time=batch_time,\r\n data_time=data_time,\r\n eta=eta_str,\r\n losses=losses,\r\n lr=self.get_current_lr()\r\n )\r\n )\r\n\r\n if self.writer is not None:\r\n n_iter = self.epoch * self.num_batches + self.batch_idx\r\n self.writer.add_scalar('Train/time', batch_time.avg, n_iter)\r\n self.writer.add_scalar('Train/data', data_time.avg, n_iter)\r\n for name, meter in losses.meters.items():\r\n self.writer.add_scalar('Train/' + name, meter.avg, n_iter)\r\n self.writer.add_scalar(\r\n 'Train/lr', self.get_current_lr(), n_iter\r\n )\r\n\r\n end = time.time()\r\n\r\n self.update_lr()\r\n\r\n def forward_backward(self, data):\r\n raise NotImplementedError\r\n\r\n def test(\r\n self,\r\n dist_metric='euclidean',\r\n normalize_feature=False,\r\n visrank=False,\r\n visrank_topk=10,\r\n save_dir='',\r\n use_metric_cuhk03=False,\r\n ranks=[1, 5, 10, 20],\r\n rerank=False\r\n ):\r\n r\"\"\"Tests model on target datasets.\r\n\r\n .. note::\r\n\r\n This function has been called in ``run()``.\r\n\r\n .. note::\r\n\r\n The test pipeline implemented in this function suits both image- and\r\n video-reid. In general, a subclass of Engine only needs to re-implement\r\n ``extract_features()`` and ``parse_data_for_eval()`` (most of the time),\r\n but not a must. Please refer to the source code for more details.\r\n \"\"\"\r\n self.set_model_mode('eval')\r\n targets = list(self.test_loader.keys())\r\n\r\n for name in targets:\r\n domain = 'source' if name in self.datamanager.sources else 'target'\r\n print('##### Evaluating {} ({}) #####'.format(name, domain))\r\n query_loader = self.test_loader[name]['query']\r\n gallery_loader = self.test_loader[name]['gallery']\r\n rank1, mAP = self._evaluate(\r\n dataset_name=name,\r\n query_loader=query_loader,\r\n gallery_loader=gallery_loader,\r\n dist_metric=dist_metric,\r\n normalize_feature=normalize_feature,\r\n visrank=visrank,\r\n visrank_topk=visrank_topk,\r\n save_dir=save_dir,\r\n use_metric_cuhk03=use_metric_cuhk03,\r\n ranks=ranks,\r\n rerank=rerank\r\n )\r\n\r\n if self.writer is not None:\r\n self.writer.add_scalar(f'Test/{name}/rank1', rank1, self.epoch)\r\n self.writer.add_scalar(f'Test/{name}/mAP', mAP, self.epoch)\r\n\r\n return rank1\r\n\r\n @torch.no_grad()\r\n def _evaluate(\r\n self,\r\n dataset_name='',\r\n query_loader=None,\r\n gallery_loader=None,\r\n dist_metric='euclidean',\r\n normalize_feature=False,\r\n visrank=False,\r\n visrank_topk=10,\r\n save_dir='',\r\n use_metric_cuhk03=False,\r\n ranks=[1, 5, 10, 20],\r\n rerank=False\r\n ):\r\n batch_time = AverageMeter()\r\n\r\n def _feature_extraction(data_loader):\r\n f_, pids_, camids_ = [], [], []\r\n for batch_idx, data in enumerate(data_loader):\r\n imgs, pids, camids = self.parse_data_for_eval(data)\r\n if self.use_gpu:\r\n imgs = imgs.cuda()\r\n end = time.time()\r\n features = self.extract_features(imgs)\r\n batch_time.update(time.time() - end)\r\n features = features.data.cpu()\r\n f_.append(features)\r\n pids_.extend(pids)\r\n camids_.extend(camids)\r\n f_ = torch.cat(f_, 0)\r\n pids_ = np.asarray(pids_)\r\n camids_ = np.asarray(camids_)\r\n return f_, pids_, camids_\r\n\r\n print('Extracting features from query set ...')\r\n qf, q_pids, q_camids = _feature_extraction(query_loader)\r\n print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))\r\n\r\n print('Extracting features from gallery set ...')\r\n gf, g_pids, g_camids = _feature_extraction(gallery_loader)\r\n print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))\r\n\r\n print('Speed: {:.4f} sec/batch'.format(batch_time.avg))\r\n\r\n if normalize_feature:\r\n print('Normalzing features with L2 norm ...')\r\n qf = F.normalize(qf, p=2, dim=1)\r\n gf = F.normalize(gf, p=2, dim=1)\r\n\r\n print(\r\n 'Computing distance matrix with metric={} ...'.format(dist_metric)\r\n )\r\n distmat = metrics.compute_distance_matrix(qf, gf, dist_metric)\r\n distmat = distmat.numpy()\r\n\r\n if rerank:\r\n print('Applying person re-ranking ...')\r\n distmat_qq = metrics.compute_distance_matrix(qf, qf, dist_metric)\r\n distmat_gg = metrics.compute_distance_matrix(gf, gf, dist_metric)\r\n distmat = re_ranking(distmat, distmat_qq, distmat_gg)\r\n\r\n print('Computing CMC and mAP ...')\r\n cmc, mAP = metrics.evaluate_rank(\r\n distmat,\r\n q_pids,\r\n g_pids,\r\n q_camids,\r\n g_camids,\r\n use_metric_cuhk03=use_metric_cuhk03\r\n )\r\n\r\n print('** Results **')\r\n print('mAP: {:.1%}'.format(mAP))\r\n print('CMC curve')\r\n for r in ranks:\r\n print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))\r\n\r\n if visrank:\r\n visualize_ranked_results(\r\n distmat,\r\n self.datamanager.fetch_test_loaders(dataset_name),\r\n self.datamanager.data_type,\r\n width=self.datamanager.width,\r\n height=self.datamanager.height,\r\n save_dir=osp.join(save_dir, 'visrank_' + dataset_name),\r\n topk=visrank_topk\r\n )\r\n\r\n return cmc[0], mAP\r\n\r\n def compute_loss(self, criterion, outputs, targets):\r\n if isinstance(outputs, (tuple, list)):\r\n loss = DeepSupervision(criterion, outputs, targets)\r\n else:\r\n loss = criterion(outputs, targets)\r\n return loss\r\n\r\n def extract_features(self, input):\r\n return self.model(input)\r\n\r\n def parse_data_for_train(self, data):\r\n imgs = data['img']\r\n pids = data['pid']\r\n return imgs, pids\r\n \r\n def parse_data_for_train_DG(self, data):\r\n imgs = data['img']\r\n pids = data['pid']\r\n camids = data['camid']\r\n dsetids = data['dsetid']\r\n return imgs, pids, camids, dsetids\r\n\r\n def parse_data_for_eval(self, data):\r\n imgs = data['img']\r\n pids = data['pid']\r\n camids = data['camid']\r\n return imgs, pids, camids\r\n\r\n def two_stepped_transfer_learning(\r\n self, epoch, fixbase_epoch, open_layers, model=None\r\n ):\r\n \"\"\"Two-stepped transfer learning.\r\n\r\n The idea is to freeze base layers for a certain number of epochs\r\n and then open all layers for training.\r\n\r\n Reference: https://arxiv.org/abs/1611.05244\r\n \"\"\"\r\n model = self.model if model is None else model\r\n if model is None:\r\n return\r\n\r\n if (epoch + 1) <= fixbase_epoch and open_layers is not None:\r\n print(\r\n '* Only train {} (epoch: {}/{})'.format(\r\n open_layers, epoch + 1, fixbase_epoch\r\n )\r\n )\r\n open_specified_layers(model, open_layers)\r\n else:\r\n open_all_layers(model)\r\n" ]
[ [ "torch.nn.functional.normalize", "torch.cat", "numpy.asarray", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available" ] ]
ShunlongHu/wows_aim
[ "4d1da293b2f0eaf657a30a6de4663b90cd2c359d" ]
[ "digitStrip.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 7 22:54:01 2022\r\n\r\n@author: HU\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom os import walk\r\n\r\nsaveDir = './Digits/'\r\nwidth = 7\r\nheight = 11\r\nyLoc = 557\r\nxLocs = (877,884,895,902)\r\n\r\n\r\n\r\nfor (dirpath, dirnames, filenames) in walk('./ScreenShots'):\r\n pass\r\n \r\ndigit_count = 0\r\nfor fname in filenames:\r\n if(fname[-4:] == '.jpg'):\r\n arr = plt.imread('./ScreenShots/'+fname)\r\n for x in xLocs:\r\n digit = arr[yLoc:yLoc+height, x:x+width, :]\r\n plt.imsave(saveDir+str(digit_count)+'.bmp',digit)\r\n digit_count+=1\r\n" ]
[ [ "matplotlib.pyplot.imread" ] ]
Jie-Yuan/myrun
[ "de6f60c609f598636d782e61271ee90aa192e155" ]
[ "myrun/run.py" ]
[ "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\"\"\"\n__title__ = 'run'\n__author__ = 'JieYuan'\n__mtime__ = '2019/4/17'\n\"\"\"\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndef main(n_iter):\n data = make_classification(10 ** 6, 10 ** 3)\n clf = RandomForestClassifier(n_jobs=-1, n_estimators=n_iter)\n clf.fit(*data)\n" ]
[ [ "sklearn.datasets.make_classification", "sklearn.ensemble.RandomForestClassifier" ] ]
evanfebrianto/night2day
[ "77ae4ecf15f0b147d23c7bd01a5f8cb75660fdb7" ]
[ "options/test_options.py" ]
[ "import os\nfrom util import util\nimport torch\nimport easydict\n\nclass BaseTestOptions():\n def __init__(self):\n self.parser = None\n self.initialized = False\n \n def initialize(self):\n self.parser = easydict.EasyDict({\n \"name\" : 'robotcar_2day',\n \"checkpoints_dir\" : './checkpoints',\n \"dataroot\" : './datasets/test_dataset/',\n \"n_domains\" : 2,\n \"max_dataset_size\" : float(\"inf\"),\n \"resize_or_crop\" : 'resize_and_crop',\n \"no_flip\" : False,\n \"loadSize\" : 512,\n \"fineSize\" : 256,\n \"batchSize\" : 1,\n \"input_nc\" : 3,\n \"output_nc\" : 3,\n \"ngf\" : 64,\n \"ndf\" : 64,\n \"netG_n_blocks\" : 9,\n \"netG_n_shared\" : 0,\n \"netD_n_layers\" : 4,\n \"norm\" : 'instance',\n \"use_dropout\" : False,\n \"gpu_ids\" : -1,\n \"nThreads\" : 1,\n \"display_id\" : 0,\n \"display_port\" : 8097,\n \"display_winsize\" : 256,\n \"display_single_pane_ncols\" : 0,\n \"results_dir\" : './results/',\n \"aspect_ratio\" : 1.0,\n \"which_epoch\" : 150,\n \"phase\" : 'test',\n \"how_many\" : 50,\n \"serial_test\" : True,\n \"autoencode\" : False,\n \"reconstruct\" : False,\n \"show_matrix\" : False\n })\n self.initialized = True\n \n def parse(self, args=None):\n if not self.initialized:\n self.initialize()\n self.opt = self.parser #.parse_args()\n self.opt.isTrain = self.isTrain # train or test\n\n if args is not None:\n for i in args:\n self.opt[i] = args[i]\n \n str_ids = [self.opt.gpu_ids] #.split(',')\n self.opt.gpu_ids = []\n for str_id in str_ids:\n id = int(str_id)\n if id >= 0:\n self.opt.gpu_ids.append(id)\n\n # set gpu ids\n if len(self.opt.gpu_ids) > 0:\n torch.cuda.set_device(self.opt.gpu_ids[0])\n\n args = vars(self.opt)\n\n print('------------ Options -------------')\n for k, v in sorted(args.items()):\n print('%s: %s' % (str(k), str(v)))\n print('-------------- End ----------------')\n\n # save to the disk\n expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)\n util.mkdirs(expr_dir)\n file_name = os.path.join(expr_dir, 'opt.txt')\n with open(file_name, 'wt') as opt_file:\n opt_file.write('------------ Options -------------\\n')\n for k, v in sorted(args.items()):\n opt_file.write('%s: %s\\n' % (str(k), str(v)))\n opt_file.write('-------------- End ----------------\\n')\n return self.opt\n\nclass TestOptions(BaseTestOptions):\n def initialize(self):\n BaseTestOptions.initialize(self)\n self.isTrain = False\n\n\n# from .base_options import BaseOptions\n\n\n# class TestOptions(BaseOptions):\n# def initialize(self):\n# BaseOptions.initialize(self)\n# self.isTrain = False\n\n# self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')\n# self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')\n\n# self.parser.add_argument('--which_epoch', required=True, type=int, help='which epoch to load for inference?')\n# self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc (determines name of folder to load from)')\n\n# self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run (if serial_test not enabled)')\n# self.parser.add_argument('--serial_test', action='store_true', help='read each image once from folders in sequential order')\n\n# self.parser.add_argument('--autoencode', action='store_true', help='translate images back into its own domain')\n# self.parser.add_argument('--reconstruct', action='store_true', help='do reconstructions of images during testing')\n\n# self.parser.add_argument('--show_matrix', action='store_true', help='visualize images in a matrix format as well')\n" ]
[ [ "torch.cuda.set_device" ] ]
fred5577/VAE-IW
[ "f3fe7c8e8e6786517aea0cc5c78f978739243b35" ]
[ "util/plots_first.py" ]
[ "import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport plotinpy as pnp\nfrom matplotlib.patches import Patch\nimport numpy as np\nimport numpy.ma as ma\n\nsns.set()\nsns.set_style(\"ticks\")\nsns.palplot(sns.color_palette(\"Paired\"))\ndata = pd.read_csv(\"barplotstuff.csv\") \ndata = data.sort_values(by=[\"Human\"]).set_index(\"Domain\")\n#fig = plt.figure(figsize=(10,15))\n\n\nf,(ax) = plt.subplots(1,1,sharey=True,figsize=(3,18))\n# f.subplots_adjust(left=0.2)\n#ax = fig.add_subplot(1,1,1)\nax.set_xlim(0,1100)\n#ax2.set_xlim(2000,13000)\n\n\n#ax.axvline(100,c='black')\nax.axhline(11.5,c='black')\n# ax2.axhline(11.5,c='black')\n\n\nax.set_facecolor('white')\n#ax2.set_facecolor('white')\n\nindices = np.arange(len(data))\n\nmask1 = ma.where(data.Human >= data.BPROST)\nmask2 = ma.where(data.Human <= data.BPROST)\nmaskNames1 = data.index[mask1].tolist()\nmaskNames2 = data.index[mask2].tolist()\n#print(data.index[mask1].tolist())\n#print(data.loc[maskNames,\"Human\"].tolist())\nax.barh(data.index,data.col, height=1, color='turquoise', alpha=1)\nax.barh(maskNames1,data.loc[maskNames1,\"Human\"].tolist(), height=0.90, color='turquoise', alpha=1, linewidth=0)\nax.barh(data.index,data.BPROST, height=0.90, color='gray', alpha=1, linewidth=0)\nax.barh(maskNames2,data.loc[maskNames2,\"Human\"].tolist(), height=0.90, color='turquoise', alpha=1, linewidth=0)\n# ax.legend(\n# [\n# Patch(facecolor=\"turquoise\"),\n# Patch(facecolor=\"gray\")\n# ], [\"DQN\", \"B-PROST\"],loc=4\n# )\n#p3 = plt.bar(data.BPROST[mask2], color='r', alpha=1, edgecolor='none',linewidth=0,width=0.5, log=False)\n\n#data.col.plot(kind='barh',width=1, ax=ax,alpha=0.4)\n#data.query(\"Human < BPROST\").Human.plot(kind='barh',width=1, color=\"turquoise\", ax=ax,alpha=0.4)\n#data.query(\"Human < BPROST\").BPROST.plot(kind='barh',width=1, color=\"gray\", ax=ax,alpha=0.6)\n#data.query(\"Human < BPROST\").BPROST.plot(kind='barh',width=1, x=[\"B-PROST\"], color=\"gray\", ax=ax,alpha=0.6)\n#data.query(\"Human > BPROST\").Human.plot(kind='barh', x=[\"VAE-IW\"],width=1, color=\"turquoise\", ax=ax,alpha=0.4)\n#data.col.plot(kind='barh',width=1, ax=ax,alpha=0.4)\n\n#plt.yticks(rotation=30)\nplt.xticks([0,100,200,400,600,800,1000])\n# plt.xticks([2500,7500,12500])\n\n#kwargs.update(transform=ax2.transAxes) # switch to the bottom axes\n#data.Human.plot(kind='barh',width=1, ax=ax2,alpha=0.5)\n#data.BPROST.plot(kind='barh',width=1, ax=ax2,alpha=0.5)\n\n#ax.set_xscale(\"log\")\n#ax2.set_xscale(\"log\")\n\n# ax.spines['bottom'].set_visible(True)\n# ax2.spines['left'].set_visible(False)\n# ax.yaxis.tick_left()\n# ax.tick_params(bottom=True) # don't put tick labels at the top\n# ax2.yaxis.tick_bottom()\n\n# Make the spacing between the two axes a bit smaller\n#plt.subplots_adjust(wspace=0.15)\nsns.despine()\n\nax.text(2500, 12-0.2, \"Above human level\", color='black')\nax.text(2500, 11-0.2, \"Below human level\", color='black')\n\nfor i, v in enumerate(data.Human):\n ax.text(10, i-0.25, str(round(v)) + \"%\", color='black')\n\n# d = .015 # how big to make the diagonal lines in axes coordinates\n# # arguments to pass plot, just so we don't keep repeating them\n# kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)\n# ax.plot((1-d,1+d),(-d,+d), **kwargs) # top-left diagonal\n# ax.plot((1-d,1+d),(1-d,1+d), **kwargs) # bottom-left diagonal\n\n# kwargs.update(transform=ax2.transAxes) # switch to the bottom axes\n# ax2.plot((-d,d),(-d,+d), **kwargs) # top-right diagonal\n# ax2.plot((-d,d),(1-d,1+d), **kwargs) # bottom-right diagonal\n\n\nf.savefig(\"first_new_1.png\")" ]
[ [ "numpy.ma.where", "matplotlib.pyplot.xticks", "pandas.read_csv", "matplotlib.pyplot.subplots" ] ]
ZzuGiser/yolov4-pytorch
[ "d0526ef75fbc3aab8bca946c090d3a16668bb385" ]
[ "road_train.py" ]
[ "#-------------------------------------#\n# 对数据集进行训练\n#-------------------------------------#\nimport os\nimport numpy as np\nimport pandas as pd\nimport time\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nfrom utils.dataloader import yolo_dataset_collate, YoloDataset\nfrom nets.yolo_training import YOLOLoss,Generator\nfrom nets.yolo4 import YoloBody\nfrom tqdm import tqdm\n\nDATA_LOSS = []\n\n#---------------------------------------------------#\n# 获得类和先验框\n#---------------------------------------------------#\ndef get_classes(classes_path):\n '''loads the classes'''\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\ndef get_anchors(anchors_path):\n '''loads the anchors from a file'''\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape([-1,3,2])[::-1,:,:]\n\ndef get_lr(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\ndef fit_one_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epoch,cuda):\n total_loss = 0\n val_loss = 0\n start_time = time.time()\n with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:\n for iteration, batch in enumerate(gen):\n if iteration >= epoch_size:\n break\n images, targets = batch[0], batch[1]\n with torch.no_grad():\n if cuda:\n images = Variable(torch.from_numpy(images).type(torch.FloatTensor)).cuda()\n targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]\n else:\n images = Variable(torch.from_numpy(images).type(torch.FloatTensor))\n targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]\n optimizer.zero_grad()\n outputs = net(images)\n losses = []\n for i in range(3):\n loss_item = yolo_losses[i](outputs[i], targets)\n losses.append(loss_item[0])\n loss = sum(losses)\n loss.backward()\n optimizer.step()\n\n total_loss += loss\n waste_time = time.time() - start_time\n \n pbar.set_postfix(**{'total_loss': total_loss.item() / (iteration + 1), \n 'lr' : get_lr(optimizer),\n 'step/s' : waste_time})\n pbar.update(1)\n\n start_time = time.time()\n net.eval()\n print('Start Validation')\n with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:\n for iteration, batch in enumerate(genval):\n if iteration >= epoch_size_val:\n break\n images_val, targets_val = batch[0], batch[1]\n\n with torch.no_grad():\n if cuda:\n images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor)).cuda()\n targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]\n else:\n images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor))\n targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]\n optimizer.zero_grad()\n outputs = net(images_val)\n losses = []\n for i in range(3):\n loss_item = yolo_losses[i](outputs[i], targets_val)\n losses.append(loss_item[0])\n loss = sum(losses)\n val_loss += loss\n pbar.set_postfix(**{'total_loss': val_loss.item() / (iteration + 1)})\n pbar.update(1)\n net.train()\n print('Finish Validation')\n print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))\n print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))\n\n print('Saving state, iter:', str(epoch+1))\n torch.save(model.state_dict(), 'logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth'%((epoch+1),total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))\n DATA_LOSS.append([float('%.4f' % (total_loss / (epoch_size + 1))), float('%.4f' % (val_loss / (epoch_size + 1)))])\n\n#----------------------------------------------------#\n# 检测精度mAP和pr曲线计算参考视频\n# https://www.bilibili.com/video/BV1zE411u7Vw\n#----------------------------------------------------#\nif __name__ == \"__main__\":\n #-------------------------------#\n # 输入的shape大小\n # 显存比较小可以使用416x416\n # 显存比较大可以使用608x608\n #-------------------------------#\n input_shape = (416,416)\n #-------------------------------#\n # tricks的使用设置\n #-------------------------------#\n Cosine_lr = False\n mosaic = True\n # 用于设定是否使用cuda\n Cuda = True\n smoooth_label = 0\n #-------------------------------#\n # Dataloder的使用\n #-------------------------------#\n Use_Data_Loader = True\n\n # annotation_path = 'road_train.txt'\n annotation_path = '../faster-rcnn-pytorch/train_road_faster_rcnn/faster_rcnn_road_sample.txt'\n\n #-------------------------------#\n # 获得先验框和类\n #-------------------------------#\n anchors_path = 'model_data/yolo_anchors.txt'\n classes_path = 'model_data/road_voc_classes.txt'\n class_names = get_classes(classes_path)\n anchors = get_anchors(anchors_path)\n num_classes = len(class_names)\n \n # 创建模型\n model = YoloBody(len(anchors[0]),num_classes)\n #-------------------------------------------#\n # 权值文件的下载请看README\n #-------------------------------------------#\n model_path = \"model_data/yolo4_weights.pth\"\n # 加快模型训练的效率\n print('Loading weights into state dict...')\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model_dict = model.state_dict()\n pretrained_dict = torch.load(model_path, map_location=device)\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n print('Finished!')\n\n net = model.train()\n\n if Cuda:\n net = torch.nn.DataParallel(model)\n cudnn.benchmark = True\n net = net.cuda()\n\n # 建立loss函数\n yolo_losses = []\n for i in range(3):\n yolo_losses.append(YOLOLoss(np.reshape(anchors,[-1,2]),num_classes, \\\n (input_shape[1], input_shape[0]), smoooth_label, Cuda))\n\n # 0.1用于验证,0.9用于训练\n val_split = 0.1\n with open(annotation_path) as f:\n lines = f.readlines()\n np.random.seed(10101)\n np.random.shuffle(lines)\n np.random.seed(None)\n num_val = int(len(lines)*val_split)\n num_train = len(lines) - num_val\n \n #------------------------------------------------------#\n # 主干特征提取网络特征通用,冻结训练可以加快训练速度\n # 也可以在训练初期防止权值被破坏。\n # Init_Epoch为起始世代\n # Freeze_Epoch为冻结训练的世代\n # Epoch总训练世代\n # 提示OOM或者显存不足请调小Batch_size\n #------------------------------------------------------#\n if True:\n lr = 1e-3\n Batch_size = 4\n Init_Epoch = 0\n Freeze_Epoch = 50\n \n optimizer = optim.Adam(net.parameters(),lr,weight_decay=5e-4)\n if Cosine_lr:\n lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)\n else:\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)\n\n if Use_Data_Loader:\n train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic)\n val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False)\n gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,\n drop_last=True, collate_fn=yolo_dataset_collate)\n gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True, \n drop_last=True, collate_fn=yolo_dataset_collate)\n else:\n gen = Generator(Batch_size, lines[:num_train],\n (input_shape[0], input_shape[1])).generate(mosaic = mosaic)\n gen_val = Generator(Batch_size, lines[num_train:],\n (input_shape[0], input_shape[1])).generate(mosaic = False)\n\n epoch_size = max(1, num_train//Batch_size)\n epoch_size_val = num_val//Batch_size\n #------------------------------------#\n # 冻结一定部分训练\n #------------------------------------#\n for param in model.backbone.parameters():\n param.requires_grad = False\n\n for epoch in range(Init_Epoch,Freeze_Epoch):\n fit_one_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Freeze_Epoch,Cuda)\n lr_scheduler.step()\n\n if True:\n lr = 1e-4\n Batch_size = 2\n Freeze_Epoch = 50\n Unfreeze_Epoch = 100\n\n optimizer = optim.Adam(net.parameters(),lr,weight_decay=5e-4)\n if Cosine_lr:\n lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)\n else:\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)\n\n if Use_Data_Loader:\n train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic)\n val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False)\n gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,\n drop_last=True, collate_fn=yolo_dataset_collate)\n gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True,\n drop_last=True, collate_fn=yolo_dataset_collate)\n else:\n gen = Generator(Batch_size, lines[:num_train],\n (input_shape[0], input_shape[1])).generate(mosaic = mosaic)\n gen_val = Generator(Batch_size, lines[num_train:],\n (input_shape[0], input_shape[1])).generate(mosaic = False)\n\n epoch_size = max(1, num_train//Batch_size)\n epoch_size_val = num_val//Batch_size\n #------------------------------------#\n # 解冻后训练\n #------------------------------------#\n for param in model.backbone.parameters():\n param.requires_grad = True\n\n for epoch in range(Freeze_Epoch,Unfreeze_Epoch):\n fit_one_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Unfreeze_Epoch,Cuda)\n lr_scheduler.step()\n loss_data_frame = pd.DataFrame(DATA_LOSS, columns=['total_loss', 'val_loss'])\n loss_data_frame.to_csv('./logs/data_loss.csv')\n\n" ]
[ [ "numpy.random.seed", "torch.load", "torch.optim.lr_scheduler.CosineAnnealingLR", "numpy.reshape", "torch.utils.data.DataLoader", "torch.from_numpy", "numpy.random.shuffle", "pandas.DataFrame", "torch.no_grad", "numpy.shape", "torch.cuda.is_available", "torch.nn.DataParallel", "numpy.array", "torch.optim.lr_scheduler.StepLR" ] ]
VictoriaNguyenMD/42-wildcard
[ "5a33dd8faeaaec5cd699d05ec87046bb67189ac5" ]
[ "tweet_analyzer.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom textblob import TextBlob\nimport re\n\nclass TweetAnalyzer():\n \"\"\"\n Functionality for analyzing and categorizing content from tweets.\n \"\"\"\n def clean_tweet(self, tweet):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n\n \"\"\"\n Function to analyze the sentimental aspects of tweets\n \"\"\"\n def analyze_sentiment(self, tweet):\n analysis = TextBlob(self.clean_tweet(tweet))\n \n if analysis.sentiment.polarity > 0:\n return 1\n elif analysis.sentiment.polarity == 0:\n return 0\n else:\n return -1\n\n \"\"\"\n Extracts text from each of the tweets and converts it into a dataframe\n param: JSON format\n \"\"\"\n def tweets_to_dataframe(self, tweets):\n df = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=[\"Tweets\"])\n df[\"id\"] = np.array([tweet.id for tweet in tweets])\n df[\"len\"] = np.array([len(tweet.text) for tweet in tweets])\n df[\"date\"] = np.array([tweet.created_at for tweet in tweets])\n df[\"source\"] = np.array([tweet.source for tweet in tweets])\n df[\"likes\"] = np.array([tweet.favorite_count for tweet in tweets])\n df[\"retweets\"] = np.array([tweet.retweet_count for tweet in tweets])\n df[\"geo\"] = np.array([tweet.geo for tweet in tweets])\n return df\n" ]
[ [ "numpy.array", "pandas.DataFrame" ] ]
burhanmudassar/pytorch-action-detection
[ "16afb9312248d73c0e2be56ac733e0a33040307e" ]
[ "lib/utils/nms_utils.py" ]
[ "import numpy as np\nfrom .box_utils import nms2d\n\ndef classAgnosticNMS(dets, scores, overlapThresh=0.3, top_k=None):\n \"\"\"Compute the NMS for a set of scored tubelets\n scored tubelets are numpy array with 4K+1 columns, last one being the score\n return the indices of the tubelets to keep\n \"\"\"\n\n # If there are no detections, return an empty list\n if len(dets) == 0: return np.empty((0,), dtype=np.int32)\n if top_k is None: top_k = len(dets)\n\n pick = []\n\n # Coordinates of bounding boxes\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n\n # Compute the area of the bounding boxes and sort the bounding\n # boxes by the bottom-right y-coordinate of the bounding box\n # area = (x2 - x1 + 1) * (y2 - y1 + 1)\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n I = np.argsort(scores)\n indices = np.empty(top_k, dtype=np.int32)\n counter = 0\n\n while I.size > 0:\n i = I[-1]\n indices[counter] = i\n counter += 1\n\n # Compute overlap\n xx1 = np.maximum(x1[i], x1[I[:-1]])\n yy1 = np.maximum(y1[i], y1[I[:-1]])\n xx2 = np.minimum(x2[i], x2[I[:-1]])\n yy2 = np.minimum(y2[i], y2[I[:-1]])\n\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n inter_area = w * h\n ious = inter_area/ (area[I[:-1]] + area[i] - inter_area)\n\n I = I[np.where(ious <= overlapThresh)[0]]\n\n if counter == top_k: break\n\n return indices[:counter]" ]
[ [ "numpy.maximum", "numpy.minimum", "numpy.argsort", "numpy.where", "numpy.empty" ] ]
TransitionProjects/ServicesConsistencyReport
[ "9c3b8b53abf391c482170649306875baf211b3bf" ]
[ "ConsistancyCheck.py" ]
[ "from tkinter.filedialog import askopenfilename\r\nfrom tkinter.filedialog import asksaveasfilename\r\n\r\nimport pandas as pd\r\n\r\n\r\nclass ConsistencyCheck:\r\n \"\"\"\r\n\r\n \"\"\"\r\n def __init__(self):\r\n pd.options.mode.chained_assignment = None\r\n self.raw_consistency_report = askopenfilename(title=\"Select the raw Services Consistency Report\")\r\n self.raw_data = pd.read_excel(\r\n self.raw_consistency_report,\r\n sheet_name=None,\r\n names=[\r\n \"Service Provider\",\r\n \"Staff Providing The Service\",\r\n \"Service Date\",\r\n \"CTID\",\r\n \"Service Type\",\r\n \"Provider Specific Code\"\r\n ]\r\n )\r\n self.staff_list = pd.read_excel(askopenfilename(title=\"Select the StaffList - All Report\"), sheet_name=\"All\")\r\n self.services = pd.concat(self.raw_data[key] for key in self.raw_data.keys())\r\n self.re_indexed = self.services.reset_index(drop=True)\r\n\r\n self.services_file = askopenfilename(title=\"Select the Services List\")\r\n self.raw_services = pd.read_excel(self.services_file).drop(\"Service You Performed\", axis=1)\r\n\r\n self.services = {}\r\n self.create_services_dict()\r\n self.drop_list = [\"Transitional Housing/Shelter\", \"Emergency Shelter\", \"Extreme Cold Weather Shelters\"]\r\n\r\n def highlight_null(self, data):\r\n \"\"\"\r\n Highlight all null elements in data-frame red.\r\n\r\n :param data: column data from the data-frame as sent by the apply method\r\n :return: color parameters to the style method\r\n \"\"\"\r\n blank = pd.isnull(data)\r\n return [\"background-color: red\" if value else \"\" for value in blank]\r\n\r\n def create_services_dict(self):\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n for row in self.raw_services.index:\r\n key = self.raw_services.loc[row, \"Service Type\"]\r\n self.services[key] = []\r\n\r\n for row in self.raw_services.index:\r\n value = self.raw_services.loc[row, \"Service Provider Specific Code\"]\r\n key = self.raw_services.loc[row, \"Service Type\"]\r\n self.services[key].append(value)\r\n\r\n return 1\r\n\r\n def process(self):\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n\r\n data = self.re_indexed[~(self.re_indexed[\"Service Type\"].isin(self.drop_list))].dropna(\r\n subset=[\"Staff Providing The Service\"]\r\n ).merge(\r\n self.staff_list, how=\"left\", left_on=\"Staff Providing The Service\", right_on=\"CM\"\r\n )\r\n data[\"Service Type Errors\"] = 0\r\n data[\"Provider Specific Service Errors\"] = 0\r\n data[\"Provider Error\"] = 0\r\n data[\"Service Date\"] = data[\"Service Date\"].dt.date\r\n\r\n # data.style.apply(self.highlight_null)\r\n for row in data.index:\r\n service_type = data.loc[row, \"Service Type\"]\r\n provider_specific = data.loc[row, \"Provider Specific Code\"]\r\n provider = data.loc[row, \"Service Provider\"]\r\n\r\n try:\r\n if not(service_type in self.services.keys()):\r\n data.loc[row, \"Service Type Errors\"] += 1\r\n else:\r\n pass\r\n finally:\r\n try:\r\n if not(provider_specific in self.services[service_type]):\r\n data.loc[row, \"Provider Specific Service Errors\"] += 1\r\n else:\r\n pass\r\n except:\r\n data.loc[row, \"Provider Specific Service Errors\"] += 0\r\n finally:\r\n try:\r\n if provider == \"Transition Projects (TPI) - Agency - SP(19)\":\r\n data.loc[row, \"Provider Error\"] += 1\r\n else:\r\n pass\r\n except:\r\n data.loc[row, \"Provider Error\"] += 0\r\n data[\"Total Errors\"] = data[\"Provider Error\"] + data[\"Provider Specific Service Errors\"] + data[\"Service Type Errors\"]\r\n self.save_values(data)\r\n\r\n def save_values(self, data_frame):\r\n \"\"\"\r\n\r\n :param data_frame:\r\n :return:\r\n \"\"\"\r\n writer = pd.ExcelWriter(asksaveasfilename(), engine=\"xlsxwriter\")\r\n for dept_name in list(set(data_frame[\"Dept\"].tolist())):\r\n dept_df = data_frame[(data_frame[\"Dept\"] == dept_name) & (data_frame[\"Total Errors\"] > 0)]\r\n dept_df.to_excel(writer, str(dept_name)[:5], engine=\"xlsxwriter\", index=False)\r\n data_frame.to_excel(writer, \"Processed Data\", index=False)\r\n self.re_indexed.to_excel(writer, \"Raw Data\", index=False)\r\n writer.save()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n a = ConsistencyCheck()\r\n a.process()" ]
[ [ "pandas.read_excel", "pandas.isnull" ] ]
jettom/numpy-unittest-100
[ "006226bf0be5e1aba56fe4da75be1e378391a1d4" ]
[ "test_array_slicing.py" ]
[ "import unittest\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nclass TestArraySlicing(unittest.TestCase):\n\n def test_slicing_1d(self):\n vector = np.arange(10)\n assert_array_equal(vector[2:5], np.array([2, 3, 4]))\n\n def test_slicing_1d_with_step(self):\n vector = np.arange(10)\n assert_array_equal(vector[0:10:2], np.array([0, 2, 4, 6, 8]))\n\n def test_slicing_1d_reverse(self):\n vector = np.arange(10)\n assert_array_equal(vector[::-1], np.array([9, 8, 7, 6, 5, 4, 3, 2, 1, 0]))\n\n def test_slicing_2d(self):\n metrix = np.arange(9).reshape(3, 3)\n assert_array_equal(metrix[0:2, 0:2], np.array([[0, 1], [3, 4]]))\n\nif __name__ == '__main__':\n unittest.main()" ]
[ [ "numpy.arange", "numpy.array" ] ]
usafchn/models
[ "480a33914d40822ce65df88f5b98d0e2372629f2" ]
[ "fluid/metric_learning/losses/metrics.py" ]
[ "import numpy as np\ndef recall_topk(fea, lab, k = 1):\n fea = np.array(fea)\n fea = fea.reshape(fea.shape[0], -1)\n n = np.sqrt(np.sum(fea**2, 1)).reshape(-1, 1)\n fea = fea/n\n a = np.sum(fea ** 2, 1).reshape(-1, 1)\n b = a.T\n ab = np.dot(fea, fea.T)\n d = a + b - 2*ab\n d = d + np.eye(len(fea)) * 1e8\n sorted_index = np.argsort(d, 1)\n res = 0\n for i in range(len(fea)):\n pred = lab[sorted_index[i][0]]\n if lab[i] == pred:\n res += 1.0\n res = res/len(fea)\n return res\n\nimport subprocess\nimport os\ndef get_gpu_num():\n visibledevice = os.getenv('CUDA_VISIBLE_DEVICES')\n if visibledevice:\n devicenum = len(visibledevice.split(','))\n else:\n devicenum = subprocess.check_output(['nvidia-smi', '-L']).count('\\n')\n return devicenum\n\nimport paddle as paddle\nimport paddle.fluid as fluid\n\ndef generate_index(batch_size, samples_each_class):\n a = np.arange(0, batch_size * batch_size)\n a = a.reshape(-1, batch_size)\n steps = batch_size // samples_each_class\n res = []\n for i in range(batch_size):\n step = i // samples_each_class\n start = step * samples_each_class\n end = (step + 1) * samples_each_class\n p = []\n n = []\n for j, k in enumerate(a[i]):\n if j >= start and j < end:\n if j == i:\n p.insert(0, k)\n else:\n p.append(k)\n else:\n n.append(k)\n comb = p + n\n res += comb\n res = np.array(res).astype(np.int32)\n return res\n\ndef calculate_order_dist_matrix(feature, batch_size, samples_each_class):\n assert(batch_size % samples_each_class == 0)\n feature = fluid.layers.reshape(feature, shape=[batch_size, -1])\n ab = fluid.layers.matmul(feature, feature, False, True)\n a2 = fluid.layers.square(feature)\n a2 = fluid.layers.reduce_sum(a2, dim = 1)\n d = fluid.layers.elementwise_add(-2*ab, a2, axis = 0)\n d = fluid.layers.elementwise_add(d, a2, axis = 1)\n d = fluid.layers.reshape(d, shape = [-1, 1])\n index = generate_index(batch_size, samples_each_class)\n index_var = fluid.layers.create_global_var(shape=[batch_size*batch_size], value=0, dtype='int32', persistable=True)\n index_var = fluid.layers.assign(index, index_var)\n d = fluid.layers.gather(d, index=index_var)\n d = fluid.layers.reshape(d, shape=[-1, batch_size])\n return d\n \n\n\n\n" ]
[ [ "numpy.dot", "numpy.arange", "numpy.argsort", "numpy.array", "numpy.sum" ] ]
keesh0410/SkillTree
[ "33478b328e501c5937bb16427266af62089c70d4" ]
[ "Machine_Learning_Code_Implementation-master/charpter15_random_forest/cart.py" ]
[ "import numpy as np\r\nfrom utils import feature_split, calculate_gini\r\n\r\n### 定义树结点\r\nclass TreeNode():\r\n def __init__(self, feature_i=None, threshold=None,\r\n leaf_value=None, left_branch=None, right_branch=None):\r\n # 特征索引\r\n self.feature_i = feature_i \r\n # 特征划分阈值\r\n self.threshold = threshold \r\n # 叶子节点取值\r\n self.leaf_value = leaf_value \r\n # 左子树\r\n self.left_branch = left_branch \r\n # 右子树\r\n self.right_branch = right_branch\r\n\r\n\t\t\r\n### 定义二叉决策树\r\nclass BinaryDecisionTree(object):\r\n ### 决策树初始参数\r\n def __init__(self, min_samples_split=2, min_gini_impurity=999,\r\n max_depth=float(\"inf\"), loss=None):\r\n # 根结点\r\n self.root = None \r\n # 节点最小分裂样本数\r\n self.min_samples_split = min_samples_split\r\n # 节点初始化基尼不纯度\r\n self.min_gini_impurity = min_gini_impurity\r\n # 树最大深度\r\n self.max_depth = max_depth\r\n # 基尼不纯度计算函数\r\n self.gini_impurity_calculation = None\r\n # 叶子节点值预测函数\r\n self._leaf_value_calculation = None\r\n # 损失函数\r\n self.loss = loss\r\n\r\n ### 决策树拟合函数\r\n def fit(self, X, y, loss=None):\r\n # 递归构建决策树\r\n self.root = self._build_tree(X, y)\r\n self.loss = None\r\n\r\n ### 决策树构建函数\r\n def _build_tree(self, X, y, current_depth=0):\r\n # 初始化最小基尼不纯度\r\n init_gini_impurity = 999\r\n # 初始化最佳特征索引和阈值\r\n best_criteria = None \r\n # 初始化数据子集\r\n best_sets = None \r\n \r\n if len(np.shape(y)) == 1:\r\n y = np.expand_dims(y, axis=1)\r\n\r\n # 合并输入和标签\r\n Xy = np.concatenate((X, y), axis=1)\r\n # 获取样本数和特征数\r\n n_samples, n_features = X.shape\r\n # 设定决策树构建条件\r\n # 训练样本数量大于节点最小分裂样本数且当前树深度小于最大深度\r\n if n_samples >= self.min_samples_split and current_depth <= self.max_depth:\r\n # 遍历计算每个特征的基尼不纯度\r\n for feature_i in range(n_features):\r\n # 获取第i特征的所有取值\r\n feature_values = np.expand_dims(X[:, feature_i], axis=1)\r\n # 获取第i个特征的唯一取值\r\n unique_values = np.unique(feature_values)\r\n\r\n # 遍历取值并寻找最佳特征分裂阈值\r\n for threshold in unique_values:\r\n # 特征节点二叉分裂\r\n Xy1, Xy2 = feature_split(Xy, feature_i, threshold)\r\n # 如果分裂后的子集大小都不为0\r\n if len(Xy1) > 0 and len(Xy2) > 0:\r\n # 获取两个子集的标签值\r\n y1 = Xy1[:, n_features:]\r\n y2 = Xy2[:, n_features:]\r\n\r\n # 计算基尼不纯度\r\n impurity = self.impurity_calculation(y, y1, y2)\r\n\r\n # 获取最小基尼不纯度\r\n # 最佳特征索引和分裂阈值\r\n if impurity < init_gini_impurity:\r\n init_gini_impurity = impurity\r\n best_criteria = {\"feature_i\": feature_i, \"threshold\": threshold}\r\n best_sets = {\r\n \"leftX\": Xy1[:, :n_features], \r\n \"lefty\": Xy1[:, n_features:], \r\n \"rightX\": Xy2[:, :n_features], \r\n \"righty\": Xy2[:, n_features:] \r\n }\r\n \r\n # 如果计算的最小不纯度小于设定的最小不纯度\r\n if init_gini_impurity < self.min_gini_impurity:\r\n # 分别构建左右子树\r\n left_branch = self._build_tree(best_sets[\"leftX\"], best_sets[\"lefty\"], current_depth + 1)\r\n right_branch = self._build_tree(best_sets[\"rightX\"], best_sets[\"righty\"], current_depth + 1)\r\n return TreeNode(feature_i=best_criteria[\"feature_i\"], threshold=best_criteria[\"threshold\"], left_branch=left_branch, right_branch=right_branch)\r\n\r\n # 计算叶子计算取值\r\n leaf_value = self._leaf_value_calculation(y)\r\n return TreeNode(leaf_value=leaf_value)\r\n\r\n ### 定义二叉树值预测函数\r\n def predict_value(self, x, tree=None):\r\n if tree is None:\r\n tree = self.root\r\n # 如果叶子节点已有值,则直接返回已有值\r\n if tree.leaf_value is not None:\r\n return tree.leaf_value\r\n # 选择特征并获取特征值\r\n feature_value = x[tree.feature_i]\r\n # 判断落入左子树还是右子树\r\n branch = tree.right_branch\r\n if isinstance(feature_value, int) or isinstance(feature_value, float):\r\n if feature_value >= tree.threshold:\r\n branch = tree.left_branch\r\n elif feature_value == tree.threshold:\r\n branch = tree.right_branch\r\n # 测试子集\r\n return self.predict_value(x, branch)\r\n\r\n ### 数据集预测函数\r\n def predict(self, X):\r\n y_pred = [self.predict_value(sample) for sample in X]\r\n return y_pred\r\n\r\n\t\t\t\t\r\nclass ClassificationTree(BinaryDecisionTree):\r\n ### 定义基尼不纯度计算过程\r\n def _calculate_gini_impurity(self, y, y1, y2):\r\n p = len(y1) / len(y)\r\n gini = calculate_gini(y)\r\n gini_impurity = p * calculate_gini(y1) + (1-p) * calculate_gini(y2)\r\n return gini_impurity\r\n \r\n ### 多数投票\r\n def _majority_vote(self, y):\r\n most_common = None\r\n max_count = 0\r\n for label in np.unique(y):\r\n # 统计多数\r\n count = len(y[y == label])\r\n if count > max_count:\r\n most_common = label\r\n max_count = count\r\n return most_common\r\n \r\n # 分类树拟合\r\n def fit(self, X, y):\r\n self.impurity_calculation = self._calculate_gini_impurity\r\n self._leaf_value_calculation = self._majority_vote\r\n super(ClassificationTree, self).fit(X, y)\r\n\r\n\t\t\r\n### CART回归树\r\nclass RegressionTree(BinaryDecisionTree):\r\n\t# 计算方差减少量\r\n def _calculate_variance_reduction(self, y, y1, y2):\r\n var_tot = np.var(y, axis=0)\r\n var_y1 = np.var(y1, axis=0)\r\n var_y2 = np.var(y2, axis=0)\r\n frac_1 = len(y1) / len(y)\r\n frac_2 = len(y2) / len(y)\r\n # 计算方差减少量\r\n variance_reduction = var_tot - (frac_1 * var_y1 + frac_2 * var_y2)\r\n return sum(variance_reduction)\r\n\r\n # 节点值取平均\r\n def _mean_of_y(self, y):\r\n value = np.mean(y, axis=0)\r\n return value if len(value) > 1 else value[0]\r\n\r\n\t# 回归树拟合\r\n def fit(self, X, y):\r\n self.impurity_calculation = self._calculate_variance_reduction\r\n self._leaf_value_calculation = self._mean_of_y\r\n super(RegressionTree, self).fit(X, y)\r\n" ]
[ [ "numpy.expand_dims", "numpy.unique", "numpy.concatenate", "numpy.mean", "numpy.shape", "numpy.var" ] ]
ellagale/testing_object_detectors_in_deepCNNs
[ "fbc15e0b8f7e023c1e3e139feec185b36f68e6f6" ]
[ "analysis.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\nimport threading\nimport imp\n\n\"\"\"This module is for developing the machinery required to make neural nets and analyse local and global codes\n\nThis module does stuff.\n\"\"\"\n\n__version__ = '0.1'\n__author__ = 'Ella Gale'\n__date__ = 'Jan 2017'\n\n\nclass ThreadedRunner(object):\n \"\"\" run a task across multiple processors, taking care not to overload them \"\"\"\n\n def __init__(self, tasks, maxparallel=8):\n \"\"\"\n tasks: an array of tuples of the form (function,arguments) to call\n maxparallel: the maximum number of threads to be running at once\n \"\"\"\n self.threads = [threading.Thread(target=f, kwargs=k) for (f, k) in tasks]\n # TODO: spin up seperate thread managers to maximise throughput\n self.maxparallel = 8\n self.next_thread = 0\n\n def run(self, threadrunlimit=None):\n \"\"\"\n threadrunlimit: only run this many threads at most total,\n if None (default) then run all threads\n \"\"\"\n runcount = len(self.threads[self.next_thread:])\n if threadrunlimit is not None:\n runcount = min(runcount, threadrunlimit)\n\n next_thread = 0\n while runcount > 0:\n batch = self.threads[next_thread:next_thread + self.maxparallel]\n\n # cannot start threads while imp lock is held.\n toLock = imp.lock_held()\n if toLock:\n imp.release_lock()\n\n # Start all threads in this batch\n for thread in batch:\n thread.start()\n\n # Wait for them all to finish\n for thread in batch:\n thread.join\n\n # rest lock state\n if toLock:\n imp.acquire_lock()\n\n runcount = runcount - len(batch)\n next_thread = next_thread + len(batch)\n\n\ndef fk_plotter(dks, noOfK, lRange=None, error=0.15, xaxis=1, title=None, xlabel=None, ylabel=None, showPlots=1,\n savePlots=0):\n \"\"\"Produces F(k) plots for each layer of neurons\"\"\"\n \"lRange = range of layers to plot\"\n \"error = error below 1 which we consider significant\"\n \"xaxis = where to draw the xaxis line\"\n if lRange == None:\n lRange = range(len(dks))\n for l in lRange:\n # l is the number of layers -- send a smaller dks if you don't want them all!\n fig = plt.figure(l)\n x_data = np.array(range(noOfK)) + 1\n marker = itertools.cycle(['o', '>', '<', 'v', '8', 'd', 's', 'p', '*'])\n for n in range(len(dks[l])):\n # n is the number neurons in a layer\n y_data = dks[l][n].fs\n plt.plot(x_data, y_data, label=str(n), marker=marker.next(), alpha=1)\n if not xaxis == None:\n # note, if you don't want an xaxis, set xaxis='off'\n plt.axhline(xaxis)\n else:\n plt.axhline(0)\n plt.xlim([min(x_data) - 0.25, max(x_data) + 1])\n # plt.legend()\n plt.legend(bbox_to_anchor=(0.9, 1.1), loc='best', ncol=2, framealpha=0.5).draggable()\n # ax.legend().draggable()\n plt.plot([0., noOfK], [1 - error, 1 - error])\n if title == None:\n plt.title('Layer ' + str(l + 1))\n else:\n plt.title(title)\n if xlabel == None:\n plt.xlabel('K')\n else:\n plt.xlabel(xlabel)\n if ylabel == None:\n plt.ylabel('f(K)')\n else:\n plt.ylabel(ylabel)\n if showPlots == 1:\n plt.show()\n if savePlots == 1:\n fig.savefig('Fk' + str(l) + '.png', dpi=fig.dpi)\n\n\ndef jitterer(out, z):\n \"\"\"This function jitters the x axis\n 1: matrix of layer activations of the form:\n 2. which layer number to do\n outputs a transposed matrix of no of neurons rows and no of data columns\"\"\"\n Jx = np.ones(out[z].T.shape)\n\n for i in range(out[z].T.shape[0]):\n 'this is the number of neurons'\n for j in range(out[z].T.shape[1]):\n 'this is the number of data'\n Jx[i, j] = i + 1 + np.random.uniform(-0.25, 0.25)\n return Jx\n\n\ndef normalise_to_zero_one_interval(y, ymin, ymax):\n \"\"\"Because I always forget the formula\"\"\"\n if ymin > ymax: raise TypeError('min and max values the wrong way round!')\n return (y - ymin) / (ymax - ymin)\n\n\ndef plotter(x, y, labels=['x', 'y'], legend=None, linestyle=['o-', '+-', '*.-'], xaxis=None, showPlots=1, savePlots=0):\n \"\"\"Make nice plots automatically\"\"\"\n fig = plt.figure(1)\n xrange = max(x) - min(x)\n yrange = max(y.flatten()) - min(y.flatten())\n if not legend == None:\n for i in range(len(y)):\n plt.plot(x, y[i], linestyle[i / 3], label=legend[i])\n else:\n for i in range(len(y)):\n plt.plot(x, y[i], linestyle[i / 3])\n if not xaxis == None:\n # note, if you don't want an xaxis, set xaxis='off'\n plt.axhline(xaxis)\n else:\n plt.axhline(0)\n\n plt.axis([min(x.flatten()) - 0.1 * xrange, max(x.flatten()) + 0.1 * xrange,\n min(y.flatten()) - 0.1 * yrange, max(y.flatten()) + 0.1 * yrange])\n plt.ylabel(labels[1])\n plt.xlabel(labels[0])\n if not legend == None:\n plt.legend(framealpha=0.5)\n if showPlots == 1:\n plt.show()\n if savePlots == 1:\n fig.savefig('Hk' + str(x[0]) + '.png', dpi=fig.dpi)\n\n\n####################################################\n## downloaded code\n\"\"\"\nDemo of a function to create Hinton diagrams.\n\nHinton diagrams are useful for visualizing the values of a 2D array (e.g.\na weight matrix): Positive and negative values are represented by white and\nblack squares, respectively, and the size of each square represents the\nmagnitude of each value.\n\nInitial idea from David Warde-Farley on the SciPy Cookbook\n\"\"\"\n\n\ndef hinton(matrix, max_weight=None, ax=None):\n \"\"\"Draw Hinton diagram for visualizing a weight matrix.\"\"\"\n ax = ax if ax is not None else plt.gca()\n\n if not max_weight:\n max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max()) / np.log(2))\n\n ax.patch.set_facecolor('gray')\n ax.set_aspect('equal', 'box')\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.yaxis.set_major_locator(plt.NullLocator())\n\n for (x, y), w in np.ndenumerate(matrix):\n color = 'white' if w > 0 else 'black'\n size = np.sqrt(np.abs(w) / max_weight)\n rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,\n facecolor=color, edgecolor=color)\n ax.add_patch(rect)\n\n ax.autoscale_view()\n ax.invert_yaxis()\n\n if __name__ == '__main__':\n hinton(np.random.rand(20, 20) - 0.5)\n plt.show()\n\n## writing model to file and reading it back in test\n" ]
[ [ "matplotlib.pyplot.Rectangle", "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "matplotlib.pyplot.axhline", "numpy.log", "numpy.abs", "matplotlib.pyplot.title", "numpy.ones", "matplotlib.pyplot.plot", "numpy.random.uniform", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "numpy.random.rand", "numpy.ndenumerate", "matplotlib.pyplot.show", "matplotlib.pyplot.NullLocator", "matplotlib.pyplot.figure" ] ]
AlchemistPrimus/data_crunchers_knbs
[ "b6d5a73bdbfed3f4a99e7047bd0747f3653a7fd2" ]
[ "data_cleaning.py" ]
[ "# Importing the necessary library\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\n\n# Shapefile function\n\ndef shapefile_func(data):\n \n shapefile_data = gpd.read_file(\"Data/County.shp\") # Shapefiles\n \n # Shapefile data preprocessing\n shapefile_data.replace(to_replace='Keiyo-Marakwet', value='Elgeyo-Marakwet', regex=True, inplace=True)\n \n shapefile_data['COUNTY'] = shapefile_data['COUNTY'].apply(lambda x: x.lower())\n shapefile_data.sort_values(by='COUNTY', ascending=True, inplace=True)\n shapefile_data.reset_index(drop = True, inplace = True)\n \n # Dataframe of shapefiles with columns of our interest\n shapefiles_df = shapefile_data[['COUNTY', 'OBJECTID', 'geometry']]\n shapefiles_df.columns = ['counties', 'objectid' ,'geometry'] # Renaming the columns\n shapefiles_df = shapefiles_df.assign(counties = data['counties'])\n \n # Merging the shapefile with the data to be mapped\n mapping_df = data.merge(shapefiles_df, on = 'counties', how = 'left')\n \n # Converting to Geodataframe\n mapping_df = gpd.GeoDataFrame(mapping_df)\n \n return (mapping_df)\n\n######################## \n# CORE HEALTHWORKFORCE #\n########################\n\n#Loading the datasets\n\n#List of health staff in 2013 from Africa open data\nHealth_staff_2013 = pd.read_csv(\"https://open.africa/dataset/4acc4709-cd40-43da-ad95-3b4a1224f97c/resource/ec2b86a8-9083-451d-b524-eba3f06382e7/download/cfafrica-_-data-team-_-outbreak-_-covid-19-_-data-_-openafrica-uploads-_-kenya-health-staff-per-.csv\")\n\n#List of health staff in 2019 from Africa open data\nHealth_staff_2019 = pd.read_csv(\"https://open.africa/dataset/4acc4709-cd40-43da-ad95-3b4a1224f97c/resource/43cea114-a929-4984-bd1c-b665d4a7ea5e/download/cfafrica-_-data-team-_-outbreak-_-covid19-_-data-_-openafrica-uploads-_-kenya-healthworkers.csv\")\n\n# Core healthworkforce function\n\ndef core_healthworkforce(Health_staff_2013, Health_staff_2019):\n \n # Renaming the columns\n Health_staff_2013.columns = ['counties', 'core_health_workforce_2013']\n Health_staff_2019.columns = ['counties', 'core_health_workforce_2019']\n \n # Changing all counties to lower case\n Health_staff_2013['counties'] = Health_staff_2013['counties'].apply(lambda x: x.lower())\n Health_staff_2019['counties'] = Health_staff_2019['counties'].apply(lambda x: x.lower()) \n \n # Sorting the counties alphabetically\n Health_staff_2013.sort_values(by='counties', ascending=True, inplace=True)\n Health_staff_2019.sort_values(by='counties', ascending=True, inplace=True)\n \n # Resetting the indeces\n Health_staff_2013.reset_index(drop=True, inplace=True)\n Health_staff_2019.reset_index(drop=True, inplace=True)\n \n # Removing the unwanted row in each dataset\n filt_2013 = (Health_staff_2013['counties'] == 'kenya')\n Health_staff_2013 = Health_staff_2013.loc[~filt_2013]\n Health_staff_2013.reset_index(drop=True, inplace=True)\n \n filt_2019 = (Health_staff_2019['counties'] == 'total')\n Health_staff_2019 = Health_staff_2019.loc[~filt_2019]\n Health_staff_2019.reset_index(drop=True, inplace=True)\n \n # Replacing 2013 Health workforce counties with 2019 counties list - This is to enable joining\n Health_staff_2013 = Health_staff_2013.assign(counties = Health_staff_2019['counties'])\n \n # Merging/Joining the two dataframes\n core_healthworkforce_df = Health_staff_2013.merge(Health_staff_2019, how = 'left', on = 'counties')\n \n # Finding the percentage change of core healthworkforce between 2013 and 2019\n core_healthworkforce_df['Percentage_change'] = round(np.divide(np.subtract(core_healthworkforce_df['core_health_workforce_2019'], \n core_healthworkforce_df['core_health_workforce_2013']),\n core_healthworkforce_df['core_health_workforce_2013'])*100, 2)\n \n core_healthworkforce_df.columns = ['counties', '2013', '2019', '% change']\n \n # Merging with shapefiles\n core_healthworkforce_shapefile = shapefile_func(core_healthworkforce_df)\n \n # Saving as Geojson\n core_healthworkforce_geojson = core_healthworkforce_shapefile.to_file(\"iaos_data\\core_healthworkforce.geojson\")\n \n # Saving the dataframe for the purpose of EDA\n core_healthworkforce_EDA = core_healthworkforce_df.to_csv(\"iaos_data\\core_healthworkforce.csv\")\n \n return(core_healthworkforce_geojson, core_healthworkforce_EDA)\n\n\n#############################\n# HOSPITAL OPERATION STATUS #\n#############################\n\n#List of hospitals and beds from Africa Open data\nHospitals_data = pd.read_csv(\"https://open.africa/dataset/bb87c99a-78f8-4b8c-8186-4b0f4d935bcd/resource/6d13d7ff-ce54-4c8e-8879-da24fd3b456d/download/cfafrica-_-data-team-_-outbreak-_-covid19-_-data-_-openafrica-uploads-_-kenya-hospital-ke.csv\")\n\n# Chnaging the hospital owner type into either governmental or non-governmental\n\nHospitals_data['Owner type'] = Hospitals_data['Owner type'].map({'Private Practice' : 'non-govt',\n 'Non-Governmental Organizations' : 'non-govt',\n 'Faith Based Organization' : 'non-govt',\n 'Ministry of Health' : 'govt'})\n\n# Hospital operation status\n\ndef hospital_operation_status(Hospitals_data):\n \n #Sorting in Ascending order\n Hospitals_data.sort_values(by='County', ascending=True, inplace=True)\n \n # Changing all counties and Facility type, to lower case\n Hospitals_data['County'] = Hospitals_data['County'].apply(lambda x: x.lower())\n Hospitals_data['Facility type'] = Hospitals_data['Facility type'].apply(lambda x: x.lower())\n \n # Getting rid of hospitals that only offer specialised services\n filt = ((Hospitals_data['Facility type'] == 'dental clinic') |\n (Hospitals_data['Facility type'] == 'vct') |\n (Hospitals_data['Facility type'] == 'laboratory') |\n (Hospitals_data['Facility type'] == 'rehab. center - drug and substance abuse') |\n (Hospitals_data['Facility type'] == 'ophthalmology') | \n (Hospitals_data['Facility type'] == 'dialysis center') |\n (Hospitals_data['Facility type'] == 'radiology clinic') |\n (Hospitals_data['Facility type'] == 'blood bank') |\n (Hospitals_data['Facility type'] == 'regional blood transfusion centre') |\n (Hospitals_data['Facility type'] == 'pharmacy') |\n (Hospitals_data['Facility type'] == 'farewell home'))\n \n Hospitals_data = Hospitals_data.loc[~filt]\n \n # Creating a dataframe with our variables of interest\n Hospital_status_df = Hospitals_data[['County', 'Owner type', 'Open_whole_day', 'Open_public_holidays', \n 'Open_weekends', 'Open_late_night']]\n \n # Owner type can be: 'Ministry of Health', 'Private Practice', 'Non-Governmental Organizations', 'Faith Based Organization'\n\n owner_type = input(\"Enter the owner type (either: 'govt' or 'non-govt'): \")\n \n # Operating time can be: 'Open_whole_day', 'Open_public_holidays', 'Open_weekends', 'Open_late_night'\n \n operating_time = input(\"Enter the operating time (either: 'Open_whole_day', 'Open_public_holidays', 'Open_weekends', 'Open_late_night'): \")\n \n # Filtering by owner type\n owner_filter = (Hospital_status_df['Owner type'] == owner_type)\n Hospital_status_df = Hospital_status_df.loc[owner_filter]\n \n # Grouping operational status of hospitals per county\n Hospital_status_df = Hospital_status_df.groupby(['County', operating_time ], as_index = False).size().pivot('County', \n operating_time)\n # Removing the pivoting extra columns\n Hospital_status_df = Hospital_status_df.droplevel(0, axis = 1).reset_index().rename_axis(columns = None)\n # Replacing all NaN with zero\n Hospital_status_df.fillna(0, inplace = True)\n \n size = Hospital_status_df['size'] = Hospital_status_df.loc[:,'No':'Yes'].sum(axis = 1) \n \n Hospital_status_df = round(Hospital_status_df.loc[:,'No':'Yes'].div(Hospital_status_df['size'], axis = 0)*100, 2)\n \n #counties\n counties = Hospitals_data['County'].unique()\n Hospital_status_df.insert(loc = 0, column = 'counties', value = counties)\n \n # Total Hospitals\n Hospital_status_df.insert(loc = 1, column = 'Total Hospitals', value = size)\n \n # Merging with shapefiles\n hospital_status_shapefile = shapefile_func(Hospital_status_df)\n \n # Saving as Geojson\n hospital_status_geojson = hospital_status_shapefile.to_file(\"iaos_data\\hospital_status.geojson\")\n \n # Saving the dataframe for the purpose of EDA\n hospital_status_EDA = Hospital_status_df.to_csv(\"iaos_data\\hospital_status.csv\")\n \n return (hospital_status_geojson, hospital_status_EDA)\n\n###############\n# CENSUS DATA #\n###############\n\n# Internet Coverage Function\n\ndef internet(Population_data, Household_data):\n \n # Sorting data into ascending order\n Population_data.sort_values(by = 'COUNTY', ascending = True, inplace = True)\n Household_data.sort_values(by = 'COUNTY', ascending = True, inplace = True)\n \n # Converting counties into lower case\n Population_data['COUNTY'] = Population_data['COUNTY'].apply(lambda x: x.lower())\n Household_data['COUNTY'] = Household_data['COUNTY'].apply(lambda x: x.lower())\n \n # Extracting counties\n counties = list(Population_data['COUNTY'].unique())\n \n # Part to be included on the website\n choose = input(\"Choose among the following (You can type either Internet_users, Internet_through_mobile, Fixed_internet_at_home): \")\n \n # Checking the internet users per county\n if choose == \"Internet_users\":\n Internet_users = Population_data.groupby(['COUNTY', 'P57'], as_index = False).size().pivot('COUNTY', 'P57')\n Internet_users = Internet_users.droplevel(0, axis=1).reset_index().rename_axis(columns = None)\n # Calculating population per county\n county_population = Population_data.groupby('COUNTY', as_index= False).size().sum(axis=1, numeric_only=True)\n # Adding the county population to the internet users dataframe\n Internet_users['county_population'] = county_population\n \n # Converting into percentages\n Internet_users = round(Internet_users.loc[:,'No':'Yes'].div(Internet_users['county_population'], axis=0) * 100, 2)\n \n # Adding counties column\n Internet_users.insert(loc = 0, column = 'counties', value = counties)\n \n Internet_users.insert(loc = 1, column = 'Population size', value = county_population)\n \n # Merging with shapefiles\n internet_users_shapefile = shapefile_func(Internet_users)\n \n # Saving as Geojson\n internet_users_geojson = internet_users_shapefile.to_file(\"iaos_data\\internet_users.geojson\")\n \n # Saving the dataframe for the purpose of EDA\n internet_users_EDA = Internet_users.to_csv(\"iaos_data\\internet_users.csv\")\n \n return (internet_users_geojson, internet_users_EDA)\n \n # Households accessing the internet through Mobile\n elif choose == \"Internet_through_mobile\":\n Internet_through_mobile = Household_data.groupby(['COUNTY', 'H39_6'], as_index = False).size().pivot('COUNTY', 'H39_6')\n Internet_through_mobile= Internet_through_mobile.droplevel(0, axis=1).reset_index().rename_axis(columns = None)\n # Calculating number of households per county\n county_household_population = Household_data.groupby('COUNTY', as_index= False).size().sum(axis=1, numeric_only=True)\n \n Internet_through_mobile['county_household_population'] = county_household_population\n \n # Converting into percentages\n Internet_through_mobile = round(Internet_through_mobile.loc[:,'No':'Yes'].div(Internet_through_mobile['county_household_population'],\n axis = 0) * 100, 2)\n \n # Adding counties column\n Internet_through_mobile.insert(loc = 0, column = 'counties', value = counties)\n \n Internet_through_mobile.insert(loc = 1, column = 'Population size', value = county_household_population)\n \n # Merging with shapefiles\n internet_through_mobile_shapefile = shapefile_func(Internet_through_mobile)\n \n # Saving as Geojson\n internet_through_mobile_geojson = internet_through_mobile_shapefile.to_file(\"iaos_data\\internet_through_mobile.geojson\")\n \n # Saving the dataframe for the purpose of EDA\n internet_through_mobile_EDA = Internet_through_mobile.to_csv(\"iaos_data\\internet_through_mobile.csv\")\n \n return (internet_through_mobile_geojson, internet_through_mobile_EDA)\n \n\n # Households access the internet through fixed internet e.g Fiber, Satellite, dish, LAN, Wi-Fi\n else:\n Fixed_internet_at_home = Household_data.groupby(['COUNTY', 'H39_7'], as_index = False).size().pivot('COUNTY', 'H39_7')\n Fixed_internet_at_home = Fixed_internet_at_home.droplevel(0, axis=1).reset_index().rename_axis(columns = None) \n # Calculating number of households per county\n county_household_population = Household_data.groupby('COUNTY', as_index= False).size().sum(axis=1, numeric_only=True)\n \n Fixed_internet_at_home['county_household_population'] = county_household_population\n \n Fixed_internet_at_home = round(Fixed_internet_at_home.loc[:,'No':'Yes'].div(Fixed_internet_at_home['county_household_population'],\n axis = 0) * 100, 2)\n \n # Adding counties column\n Fixed_internet_at_home.insert(loc = 0, column = 'counties', value = counties)\n \n Fixed_internet_at_home.insert(loc = 1, column = 'Population Size', value = county_household_population)\n \n # Merging with shapefiles\n fixed_internet_at_home_shapefile = shapefile_func(Fixed_internet_at_home)\n \n # Saving as Geojson\n fixed_internet_at_home_geojson = fixed_internet_at_home_shapefile.to_file(\"iaos_data\\homes_fixed_with_internet.geojson\")\n \n \n # Saving the dataframe for the purpose of EDA\n fixed_internet_at_home_EDA = Fixed_internet_at_home.to_csv(\"iaos_data\\homes_fixed_with_internet.csv\")\n \n return (fixed_internet_at_home_geojson, fixed_internet_at_home_EDA) \n\n\n\n# Place of Birth function\n\ndef place_of_birth(Population_data):\n \n # Sorting data into ascending order\n Population_data.sort_values(by = 'COUNTY', ascending = True, inplace = True)\n \n # Converting counties into lower case\n Population_data['COUNTY'] = Population_data['COUNTY'].apply(lambda x: x.lower())\n\n #Checking the place of birth per county\n Place_of_birth = Population_data.groupby(['COUNTY', 'P36'], as_index = False).size().pivot('COUNTY', 'P36')\n Place_of_birth = Place_of_birth.droplevel(0, axis=1).reset_index().rename_axis(columns = None) \n total_births = Place_of_birth['size'] = Place_of_birth.loc[:,\"DK\":\"Non Health Facility\"].sum(axis=1)\n \n # Adding counties\n counties = list(Population_data['COUNTY'].unique())\n \n # Converting into percentages\n Place_of_birth = round(Place_of_birth.loc[:,'DK':'Non Healthy Facility'].div(Place_of_birth['size'], axis = 0) * 100, 2)\n \n # Adding the counties column\n Place_of_birth.insert(loc = 0, column = 'counties', value = counties)\n \n Place_of_birth.insert(loc = 1, column = 'Total births', value = total_births)\n \n # Merging with shapefiles\n place_of_birth_shapefile = shapefile_func(Place_of_birth)\n \n # Saving as Geojson\n place_of_birth_geojson = place_of_birth_shapefile.to_file(\"iaos_data\\place_of_birth.geojson\")\n \n # Saving the dataframe for the purpose of EDA\n place_of_birth_EDA = Place_of_birth.to_csv(\"iaos_data\\place_of_birth.csv\")\n \n return (place_of_birth_geojson, place_of_birth_EDA) \n\n\n# Main Source of Drinking Water function\n\ndef source_of_drinking_water(Household_data):\n \n # Sorting data into ascending order\n Household_data.sort_values(by = 'COUNTY', ascending = True, inplace = True)\n \n # Converting counties into lower case\n Household_data['COUNTY'] = Household_data['COUNTY'].apply(lambda x: x.lower())\n \n # counties\n counties = list(Household_data['COUNTY'].unique())\n \n # Main source of drinking water for households\n Main_source_of_drinking_water = Household_data.groupby(['COUNTY', 'H33'], as_index = False).size().pivot('COUNTY', 'H33')\n Main_source_of_drinking_water = Main_source_of_drinking_water.droplevel(0, axis=1).reset_index().rename_axis(columns = None) #Removing the pivoting extra columns\n\n total_households = Main_source_of_drinking_water.loc[:,' Water Vendor':'Unprotected Well'].sum(axis = 1)\n \n # Improved drinking water sources\n improved_list = ['Borehole/Tube well', 'Bottled water','Piped to yard/plot', 'Piped into dwelling', 'Protected Spring', \n 'Protected Well', 'Public tap/Standpipe', 'Rain/Harvested water']\n improved_sources_df = Main_source_of_drinking_water.loc[:,improved_list]\n improved_sources_df['improved_size'] = improved_sources_df.sum(axis = 1)\n improved_sources_df.insert(loc = 0, column = 'counties', value = counties)\n improved_sources_df = improved_sources_df.loc[:,['counties','improved_size']]\n \n # Unimproved drinking water sources\n unimproved_list = [' Water Vendor', 'Dam', 'Lake', 'Pond', 'Stream/River', 'Unprotected Spring', 'Unprotected Well']\n unimproved_sources_df = Main_source_of_drinking_water.loc[:,unimproved_list]\n unimproved_sources_df['unimproved_size'] = unimproved_sources_df.sum(axis = 1)\n unimproved_sources_df.insert(loc = 0, column = 'counties', value = counties)\n unimproved_sources_df = unimproved_sources_df.loc[:,['counties','unimproved_size']]\n \n # Creating a data frame containing county, improved water sources, and unimproved water sources\n drinking_water_df = improved_sources_df.merge(unimproved_sources_df, how = 'left', on = 'counties')\n \n drinking_water_df['total_size'] = drinking_water_df.loc[:,'improved_size':'unimproved_size'].sum(axis = 1)\n drinking_water_df = round(drinking_water_df.loc[:,'improved_size':'unimproved_size'].div(drinking_water_df['total_size'],\n axis = 0)*100, 2)\n \n # Adding the counties column\n drinking_water_df.insert(loc = 0, column = 'counties', value = counties)\n \n # Renaming columns\n drinking_water_df.columns = ['counties', 'Safe', 'Unsafe']\n drinking_water_df.insert(loc = 1, column = 'Total Households', value = total_households)\n \n # Merging with shapefiles\n drinking_water_shapefile = shapefile_func(drinking_water_df)\n \n # Saving as Geojson\n drinking_water_geojson = drinking_water_shapefile.to_file(\"iaos_data\\main_source_of_drinking_water.geojson\")\n \n # Saving the dataframe for the purpose of EDA\n drinking_water_EDA = drinking_water_df.to_csv(\"iaos_data\\main_source_of_drinking_water.csv\")\n \n return (drinking_water_geojson, drinking_water_EDA)\n\n\n# Main mode of human waste disposal\n\ndef mode_of_human_waste_disposal(Household_data):\n \n # Sorting data into ascending order\n Household_data.sort_values(by = 'COUNTY', ascending = True, inplace = True)\n \n # Converting counties into lower case\n Household_data['COUNTY'] = Household_data['COUNTY'].apply(lambda x: x.lower())\n \n # counties\n counties = list(Household_data['COUNTY'].unique())\n \n # Main mode of human waste disposal for households\n Main_mode_of_human_waste_disposal = Household_data.groupby(['COUNTY', 'H34'], as_index = False).size().pivot('COUNTY', 'H34')\n Main_mode_of_human_waste_disposal = Main_mode_of_human_waste_disposal.droplevel(0, axis=1).reset_index().rename_axis(columns = None) \n\n total_households = Main_mode_of_human_waste_disposal.loc[:,'Bio-septic tank/Biodigester':'VIP Pit Latrin'].sum(axis = 1)\n \n # Adequate human waste disposal methods\n adequate_list = ['Pit latrine covered', 'VIP Pit Latrin', 'Septic tank', 'Main Sewer', 'Bio-septic tank/Biodigester',\n 'Cess pool']\n adequate_methods_df = Main_mode_of_human_waste_disposal.loc[:,adequate_list]\n adequate_methods_df['adequate_size'] = adequate_methods_df.sum(axis = 1)\n adequate_methods_df.insert(loc = 0, column = 'counties', value = counties)\n adequate_methods_df = adequate_methods_df.loc[:,['counties','adequate_size']]\n \n # Inadequate human waste disposal methods\n inadequate_list = ['Bush', 'Pit Latrine uncovered', 'Bucket latrine']\n inadequate_methods_df = Main_mode_of_human_waste_disposal.loc[:,inadequate_list]\n inadequate_methods_df['inadequate_size'] = inadequate_methods_df.sum(axis = 1)\n inadequate_methods_df.insert(loc = 0, column = 'counties', value = counties)\n inadequate_methods_df = inadequate_methods_df.loc[:,['counties','inadequate_size']]\n \n # Creating a data frame containing county, adequate and inadequate human waste disposal methods\n human_waste_disposal_df = adequate_methods_df.merge(inadequate_methods_df, how = 'left', on = 'counties')\n \n human_waste_disposal_df['total_size'] = human_waste_disposal_df.loc[:,'adequate_size':'inadequate_size'].sum(axis = 1)\n human_waste_disposal_df = round(human_waste_disposal_df.loc[:,'adequate_size':'inadequate_size'].div(human_waste_disposal_df['total_size'],\n axis = 0) * 100, 2)\n \n # Adding the counties column\n human_waste_disposal_df.insert(loc = 0, column = 'counties', value = counties)\n \n # Renaming columns\n human_waste_disposal_df.columns = ['counties', 'Proper', 'Improper']\n \n human_waste_disposal_df.insert(loc = 1, column = 'Total Households', value = total_households)\n \n # Merging with shapefiles\n human_waste_disposal_shapefile = shapefile_func(human_waste_disposal_df)\n \n # Saving as Geojson\n human_waste_disposal_geojson = human_waste_disposal_shapefile.to_file(\"iaos_data\\human_waste_disposal.geojson\")\n \n # Saving the dataframe for the purpose of EDA\n human_waste_disposal_EDA = human_waste_disposal_df.to_csv(\"iaos_data\\human_waste_disposal.csv\")\n \n return (human_waste_disposal_geojson, human_waste_disposal_EDA)\n" ]
[ [ "pandas.read_csv", "numpy.subtract" ] ]
mrdvince/tensorflowdevcert
[ "6300521c738b0a646a47627e9e8d656ed072cad3" ]
[ "udacity/l02c01_celsius_to_fahrenheit.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport logging\n\nlogger = tf.get_logger()\nlogger.setLevel(logging.ERROR)\n\n# training data\ncelsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)\nfahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)\n\nfor i, c in enumerate(celsius_q):\n print(\"{} degrees Celsius = {} degrees Fahrenheit\".format(c, fahrenheit_a[i]))\n\n# create model\nfc0 = tf.keras.layers.Dense(units=1, input_shape=[1])\nmodel = tf.keras.Sequential([fc0])\n# model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])])\n\n# compile model\nmodel.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1))\n\n# train model\n\nhistory = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)\nprint(\"Finished training model\")\n\n# plot stats\n\nimport matplotlib.pyplot as plt\n\nplt.xlabel(\"Epoch Number\")\nplt.ylabel(\"Loss Magnitude\")\nplt.plot(history.history[\"loss\"])\n\n# use model to predict values\n\nprint(model.predict([100.0]))\n\n# layer weights\nprint(fc0.get_weights())\n\n# more layers\nfc0 = tf.keras.layers.Dense(units=4, input_shape=[1])\nfc1 = tf.keras.layers.Dense(units=4)\nfc2 = tf.keras.layers.Dense(units=1)\n\nmodel2 = tf.keras.Sequential([fc0, fc1, fc2])\nmodel2.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.Adam(0.1))\nmodel2.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)\nprint(\"Finished training the model\")\nprint(model.predict([100.0]))\nprint(\n \"Model predicts that 100 degrees Celsius is: {} degrees Fahrenheit\".format(\n model.predict([100.0])\n )\n)\nprint(\"These are the l0 variables: {}\".format(fc0.get_weights()))\nprint(\"These are the l1 variables: {}\".format(fc1.get_weights()))\nprint(\"These are the l2 variables: {}\".format(fc2.get_weights()))\n" ]
[ [ "tensorflow.keras.layers.Dense", "tensorflow.keras.Sequential", "tensorflow.get_logger", "matplotlib.pyplot.plot", "tensorflow.keras.optimizers.Adam", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.ylabel" ] ]
astromme/classify-handwritten-characters
[ "d55c572c8604b898deeb874c24d18af3e15807a0" ]
[ "libgnt/gnt.py" ]
[ "#!/usr/bin/env python3\n\nimport os\nimport numpy as np\n\nfrom .tagcode import tagcode_to_unicode\n\ndef samples_from_gnt(gnt_filepath):\n \"\"\"\n Given a gnt file path,\n returns generater that yields samples of (bitmap, character)\n \"\"\"\n header_size = 10\n\n with open(gnt_filepath, 'rb') as f:\n # read samples from f until no bytes remaining\n while True:\n header = np.fromfile(f, dtype='uint8', count=header_size)\n if not header.size: break\n\n sample_size = header[0] + (header[1]<<8) + (header[2]<<16) + (header[3]<<24)\n tagcode = header[5] + (header[4]<<8)\n width = header[6] + (header[7]<<8)\n height = header[8] + (header[9]<<8)\n assert header_size + width*height == sample_size\n\n bitmap = np.fromfile(f, dtype='uint8', count=width*height).reshape((height, width))\n yield bitmap, tagcode_to_unicode(tagcode)\n\ndef samples_from_directory(dirpath):\n \"\"\"\n Given a directory path,\n Returns generator that yields samples of (bitmap, character)\n From all .gnt files in that directory.\n \"\"\"\n\n for file_name in os.listdir(dirpath):\n if file_name.endswith('.gnt'):\n file_path = os.path.join(dirpath, file_name)\n for bitmap, character in samples_from_gnt(file_path):\n yield bitmap, character\n" ]
[ [ "numpy.fromfile" ] ]
ashwinipokle/deq
[ "955560601ac7b9dd3088e918850efd9ba14b7610" ]
[ "MDEQ-Vision/lib/models/mdeq_xt.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport os\nimport sys\nimport logging\nimport functools\nfrom termcolor import colored\n\nfrom collections import OrderedDict\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch._utils\nimport torch.nn.functional as F\n\nsys.path.append(\"lib/models\")\nfrom mdeq_core_xt import MDEQDiffNet\n\nsys.path.append(\"../\")\nfrom lib.layer_utils import conv3x3\n\nBN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\n\n\ndef nonlinearity(x):\n # swish\n return x*torch.sigmoid(x)\n\ndef get_timestep_embedding(timesteps, embedding_dim):\n \"\"\"\n This matches the implementation in Denoising Diffusion Probabilistic Models:\n From Fairseq.\n Build sinusoidal embeddings.\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n assert len(timesteps.shape) == 1\n\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)\n emb = emb.to(device=timesteps.device)\n emb = timesteps.float()[:, None] * emb[None, :]\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)\n if embedding_dim % 2 == 1: # zero pad\n emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))\n return emb\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n \"\"\"\n A bottleneck block with receptive field only 3x3. (This is not used in MDEQ; only\n in the classifier layer).\n \"\"\"\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM, affine=False)\n self.conv2 = conv3x3(planes, planes, stride=stride)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM, affine=False)\n self.conv3 = nn.Conv2d(planes, planes*self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes*self.expansion, momentum=BN_MOMENTUM, affine=False)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x, injection=None):\n if injection is None:\n injection = 0\n residual = x\n\n out = self.conv1(x) + injection\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n return out\n\n# Replace all batch norm with group norm?\nclass MDEQDiffusionNet(MDEQDiffNet):\n def __init__(self, cfg, **kwargs):\n \"\"\"\n Build an MDEQ Segmentation model with the given hyperparameters\n \"\"\"\n global BN_MOMENTUM\n\n self.ch = cfg.DIFFUSION_MODEL.CHANNELS\n\n super(MDEQDiffusionNet, self).__init__(cfg, BN_MOMENTUM=BN_MOMENTUM, **kwargs)\n self.head_channels = cfg['MODEL']['EXTRA']['FULL_STAGE']['HEAD_CHANNELS']\n self.final_chansize = cfg['MODEL']['EXTRA']['FULL_STAGE']['FINAL_CHANSIZE']\n self.out_chansize = cfg['DIFFUSION_MODEL']['OUT_CHANNELS']\n # timestep embedding\n self.temb = nn.Module()\n self.temb.dense = nn.ModuleList([\n torch.nn.Linear(cfg.DIFFUSION_MODEL.CHANNELS,\n cfg.DIFFUSION_MODEL.TEMB_CHANNELS),\n torch.nn.Linear(cfg.DIFFUSION_MODEL.TEMB_CHANNELS,\n cfg.DIFFUSION_MODEL.TEMB_CHANNELS),\n ])\n\n # Classification Head\n # self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(self.num_channels)\n\n # Last layer\n # self.last_layer = nn.Conv2d(self.final_chansize, self.out_chansize, kernel_size=3, \n # stride=1, padding=1)\n \n last_inp_channels = np.int(np.sum(self.num_channels))\n self.last_layer = nn.Sequential(nn.Conv2d(last_inp_channels, last_inp_channels//2, kernel_size=1),\n nn.BatchNorm2d(last_inp_channels//2, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True),\n nn.Conv2d(last_inp_channels//2, self.out_chansize, kernel_size=3, \n stride=1, padding=1))\n\n\n # def _make_head(self, pre_stage_channels):\n # \"\"\"\n # Create a final prediction head that:\n # - Increase the number of features in each resolution \n # - Downsample higher-resolution equilibria to the lowest-resolution and concatenate\n # - Pass through a final FC layer for classification\n # \"\"\"\n # head_block = Bottleneck\n # d_model = self.init_chansize\n # head_channels = self.head_channels\n \n # # Increasing the number of channels on each resolution when doing classification. \n # incre_modules = []\n # for i, channels in enumerate(pre_stage_channels):\n # incre_module = self._make_layer(head_block, channels, head_channels[i], blocks=1, stride=1)\n # incre_modules.append(incre_module)\n # incre_modules = nn.ModuleList(incre_modules)\n \n # Downsample the high-resolution streams to perform classification\n # downsamp_modules = []\n # for i in range(len(pre_stage_channels)-1):\n # in_channels = head_channels[i] * head_block.expansion\n # out_channels = head_channels[i+1] * head_block.expansion\n # downsamp_module = nn.Sequential(conv3x3(in_channels, out_channels, stride=2, bias=True),\n # nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM),\n # nn.ReLU(inplace=True))\n # downsamp_modules.append(downsamp_module)\n # downsamp_modules = nn.ModuleList(downsamp_modules)\n\n # # Final FC layers\n # final_layer = nn.Sequential(nn.Conv2d(head_channels[len(pre_stage_channels)-1] * head_block.expansion,\n # self.final_chansize, kernel_size=1),\n # nn.BatchNorm2d(self.final_chansize, momentum=BN_MOMENTUM),\n # nn.ReLU(inplace=True))\n # return incre_modules, downsamp_modules, final_layer\n\n # def _make_layer(self, block, inplanes, planes, blocks, stride=1, padding=0):\n # downsample = None\n # if stride != 1 or inplanes != planes * block.expansion:\n # downsample = nn.Sequential(nn.Conv2d(inplanes, planes*block.expansion, kernel_size=1, stride=stride, bias=False, padding=padding),\n # nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM))\n\n # layers = []\n # layers.append(block(inplanes, planes, stride, downsample))\n # inplanes = planes * block.expansion\n # for i in range(1, blocks):\n # layers.append(block(inplanes, planes))\n\n # return nn.Sequential(*layers)\n\n\n # def predict_noise(self, y_list):\n # \"\"\"\n # Combine all resolutions and output noise\n # \"\"\"\n # import pdb; pdb.set_trace()\n # y = self.incre_modules[0](y_list[0])\n # for i in range(len(self.downsamp_modules)):\n # y = self.incre_modules[i+1](y_list[i+1]) + self.downsamp_modules[i](y)\n # y = torch.nn.functional.interpolate(\n # y, scale_factor=2.0, mode=\"nearest\")\n # y = self.final_layer(y)\n # y = self.last_layer(y)\n # return y\n\n def predict_noise(self, y_list):\n \"\"\"\n Combine all resolutions and output noise\n \"\"\"\n y0_h, y0_w = y_list[0].size(2), y_list[0].size(3)\n all_res = [y_list[0]]\n for i in range(1, self.num_branches):\n all_res.append(F.interpolate(y_list[i], size=(y0_h, y0_w), mode='bilinear', align_corners=True))\n\n y = torch.cat(all_res, dim=1)\n all_res = None\n y = self.last_layer(y)\n\n # y = self.final_layer(y)\n # y = self.last_layer(y)\n return y\n\n def forward(self, x, t, train_step=0, **kwargs):\n # timestep embedding\n temb = get_timestep_embedding(t, self.ch)\n temb = self.temb.dense[0](temb)\n temb = nonlinearity(temb)\n temb = self.temb.dense[1](temb)\n\n output, jac_loss, sradius = self._forward(x, temb, train_step, **kwargs)\n return self.predict_noise(output), jac_loss, sradius\n \n def init_weights(self, pretrained=''):\n \"\"\"\n Model initialization. If pretrained weights are specified, we load the weights.\n \"\"\"\n logger.info(f'=> init weights from normal distribution. PRETRAINED={pretrained}')\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n m.weight.data.normal_(0, 0.01)\n if m.bias is not None:\n m.bias.data.normal_(0, 0.01)\n elif isinstance(m, nn.BatchNorm2d) and m.weight is not None:\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n if os.path.isfile(pretrained):\n pretrained_dict = torch.load(pretrained)\n logger.info('=> loading pretrained model {}'.format(pretrained))\n model_dict = self.state_dict()\n \n # Just verification...\n diff_modules = set()\n for k in pretrained_dict.keys():\n if k not in model_dict.keys():\n diff_modules.add(k.split(\".\")[0])\n print(colored(f\"In ImageNet MDEQ but not Cityscapes MDEQ: {sorted(list(diff_modules))}\", \"red\"))\n diff_modules = set()\n for k in model_dict.keys():\n if k not in pretrained_dict.keys():\n diff_modules.add(k.split(\".\")[0])\n print(colored(f\"In Cityscapes MDEQ but not ImageNet MDEQ: {sorted(list(diff_modules))}\", \"green\"))\n \n pretrained_dict = {k: v for k, v in pretrained_dict.items()\n if k in model_dict.keys()}\n model_dict.update(pretrained_dict)\n self.load_state_dict(model_dict)\n\ndef get_diffusion_net(config, **kwargs):\n global BN_MOMENTUM\n BN_MOMENTUM = 0.01\n model = MDEQDiffusionNet(config, **kwargs)\n model.init_weights(config.MODEL.PRETRAINED)\n return model" ]
[ [ "torch.cos", "torch.sigmoid", "torch.cat", "torch.sin", "torch.load", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.arange", "torch.nn.Module", "torch.nn.Linear", "torch.nn.functional.interpolate", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "numpy.sum", "torch.nn.functional.pad" ] ]
lilyminium/xarray
[ "4bad455a801e91b329794895afa0040c868ff128" ]
[ "xarray/backends/netCDF4_.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport functools\nimport operator\nimport warnings\nfrom distutils.version import LooseVersion\n\nimport numpy as np\n\nfrom .. import Variable, coding\nfrom ..coding.variables import pop_to\nfrom ..core import indexing\nfrom ..core.pycompat import PY3, OrderedDict, basestring, iteritems, suppress\nfrom ..core.utils import FrozenOrderedDict, close_on_error, is_remote_uri\nfrom .common import (\n BackendArray, WritableCFDataStore, find_root, robust_getitem)\nfrom .locks import (NETCDFC_LOCK, HDF5_LOCK,\n combine_locks, ensure_lock, get_write_lock)\nfrom .file_manager import CachingFileManager, DummyFileManager\nfrom .netcdf3 import encode_nc3_attr_value, encode_nc3_variable\n\n# This lookup table maps from dtype.byteorder to a readable endian\n# string used by netCDF4.\n_endian_lookup = {'=': 'native',\n '>': 'big',\n '<': 'little',\n '|': 'native'}\n\n\nNETCDF4_PYTHON_LOCK = combine_locks([NETCDFC_LOCK, HDF5_LOCK])\n\n\nclass BaseNetCDF4Array(BackendArray):\n def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n\n array = self.get_array()\n self.shape = array.shape\n\n dtype = array.dtype\n if dtype is str:\n # use object dtype because that's the only way in numpy to\n # represent variable length strings; it also prevents automatic\n # string concatenation via conventions.decode_cf_variable\n dtype = np.dtype('O')\n self.dtype = dtype\n\n def __setitem__(self, key, value):\n with self.datastore.lock:\n data = self.get_array()\n data[key] = value\n if self.datastore.autoclose:\n self.datastore.close(needs_lock=False)\n\n def get_array(self):\n return self.datastore.ds.variables[self.variable_name]\n\n\nclass NetCDF4ArrayWrapper(BaseNetCDF4Array):\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER,\n self._getitem)\n\n def _getitem(self, key):\n if self.datastore.is_remote: # pragma: no cover\n getitem = functools.partial(robust_getitem, catch=RuntimeError)\n else:\n getitem = operator.getitem\n\n original_array = self.get_array()\n\n try:\n with self.datastore.lock:\n array = getitem(original_array, key)\n except IndexError:\n # Catch IndexError in netCDF4 and return a more informative\n # error message. This is most often called when an unsorted\n # indexer is used before the data is loaded from disk.\n msg = ('The indexing operation you are attempting to perform '\n 'is not valid on netCDF4.Variable object. Try loading '\n 'your data into memory first by calling .load().')\n if not PY3:\n import traceback\n msg += '\\n\\nOriginal traceback:\\n' + traceback.format_exc()\n raise IndexError(msg)\n return array\n\n\ndef _encode_nc4_variable(var):\n for coder in [coding.strings.EncodedStringCoder(allows_unicode=True),\n coding.strings.CharacterArrayCoder()]:\n var = coder.encode(var)\n return var\n\n\ndef _check_encoding_dtype_is_vlen_string(dtype):\n if dtype is not str:\n raise AssertionError( # pragma: no cover\n \"unexpected dtype encoding %r. This shouldn't happen: please \"\n \"file a bug report at github.com/pydata/xarray\" % dtype)\n\n\ndef _get_datatype(var, nc_format='NETCDF4', raise_on_invalid_encoding=False):\n if nc_format == 'NETCDF4':\n datatype = _nc4_dtype(var)\n else:\n if 'dtype' in var.encoding:\n encoded_dtype = var.encoding['dtype']\n _check_encoding_dtype_is_vlen_string(encoded_dtype)\n if raise_on_invalid_encoding:\n raise ValueError(\n 'encoding dtype=str for vlen strings is only supported '\n 'with format=\\'NETCDF4\\'.')\n datatype = var.dtype\n return datatype\n\n\ndef _nc4_dtype(var):\n if 'dtype' in var.encoding:\n dtype = var.encoding.pop('dtype')\n _check_encoding_dtype_is_vlen_string(dtype)\n elif coding.strings.is_unicode_dtype(var.dtype):\n dtype = str\n elif var.dtype.kind in ['i', 'u', 'f', 'c', 'S']:\n dtype = var.dtype\n else:\n raise ValueError('unsupported dtype for netCDF4 variable: {}'\n .format(var.dtype))\n return dtype\n\n\ndef _netcdf4_create_group(dataset, name):\n return dataset.createGroup(name)\n\n\ndef _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group):\n if group in set([None, '', '/']):\n # use the root group\n return ds\n else:\n # make sure it's a string\n if not isinstance(group, basestring):\n raise ValueError('group must be a string or None')\n # support path-like syntax\n path = group.strip('/').split('/')\n for key in path:\n try:\n ds = ds.groups[key]\n except KeyError as e:\n if mode != 'r':\n ds = create_group(ds, key)\n else:\n # wrap error to provide slightly more helpful message\n raise IOError('group not found: %s' % key, e)\n return ds\n\n\ndef _ensure_fill_value_valid(data, attributes):\n # work around for netCDF4/scipy issue where _FillValue has the wrong type:\n # https://github.com/Unidata/netcdf4-python/issues/271\n if data.dtype.kind == 'S' and '_FillValue' in attributes:\n attributes['_FillValue'] = np.string_(attributes['_FillValue'])\n\n\ndef _force_native_endianness(var):\n # possible values for byteorder are:\n # = native\n # < little-endian\n # > big-endian\n # | not applicable\n # Below we check if the data type is not native or NA\n if var.dtype.byteorder not in ['=', '|']:\n # if endianness is specified explicitly, convert to the native type\n data = var.data.astype(var.dtype.newbyteorder('='))\n var = Variable(var.dims, data, var.attrs, var.encoding)\n # if endian exists, remove it from the encoding.\n var.encoding.pop('endian', None)\n # check to see if encoding has a value for endian its 'native'\n if not var.encoding.get('endian', 'native') is 'native':\n raise NotImplementedError(\"Attempt to write non-native endian type, \"\n \"this is not supported by the netCDF4 \"\n \"python library.\")\n return var\n\n\ndef _extract_nc4_variable_encoding(variable, raise_on_invalid=False,\n lsd_okay=True, h5py_okay=False,\n backend='netCDF4', unlimited_dims=None):\n if unlimited_dims is None:\n unlimited_dims = ()\n\n encoding = variable.encoding.copy()\n\n safe_to_drop = set(['source', 'original_shape'])\n valid_encodings = set(['zlib', 'complevel', 'fletcher32', 'contiguous',\n 'chunksizes', 'shuffle', '_FillValue', 'dtype'])\n if lsd_okay:\n valid_encodings.add('least_significant_digit')\n if h5py_okay:\n valid_encodings.add('compression')\n valid_encodings.add('compression_opts')\n\n if not raise_on_invalid and encoding.get('chunksizes') is not None:\n # It's possible to get encoded chunksizes larger than a dimension size\n # if the original file had an unlimited dimension. This is problematic\n # if the new file no longer has an unlimited dimension.\n chunksizes = encoding['chunksizes']\n chunks_too_big = any(\n c > d and dim not in unlimited_dims\n for c, d, dim in zip(chunksizes, variable.shape, variable.dims))\n changed_shape = encoding.get('original_shape') != variable.shape\n if chunks_too_big or changed_shape:\n del encoding['chunksizes']\n\n for k in safe_to_drop:\n if k in encoding:\n del encoding[k]\n\n if raise_on_invalid:\n invalid = [k for k in encoding if k not in valid_encodings]\n if invalid:\n raise ValueError('unexpected encoding parameters for %r backend: '\n ' %r' % (backend, invalid))\n else:\n for k in list(encoding):\n if k not in valid_encodings:\n del encoding[k]\n\n return encoding\n\n\nclass GroupWrapper(object):\n \"\"\"Wrap netCDF4.Group objects so closing them closes the root group.\"\"\"\n def __init__(self, value):\n self.value = value\n\n def close(self):\n # netCDF4 only allows closing the root group\n find_root(self.value).close()\n\n\ndef _open_netcdf4_group(filename, lock, mode, group=None, **kwargs):\n import netCDF4 as nc4\n\n ds = nc4.Dataset(filename, mode=mode, **kwargs)\n\n with close_on_error(ds):\n ds = _nc4_require_group(ds, group, mode)\n\n _disable_auto_decode_group(ds)\n\n return GroupWrapper(ds)\n\n\ndef _disable_auto_decode_variable(var):\n \"\"\"Disable automatic decoding on a netCDF4.Variable.\n\n We handle these types of decoding ourselves.\n \"\"\"\n var.set_auto_maskandscale(False)\n\n # only added in netCDF4-python v1.2.8\n with suppress(AttributeError):\n var.set_auto_chartostring(False)\n\n\ndef _disable_auto_decode_group(ds):\n \"\"\"Disable automatic decoding on all variables in a netCDF4.Group.\"\"\"\n for var in ds.variables.values():\n _disable_auto_decode_variable(var)\n\n\ndef _is_list_of_strings(value):\n if (np.asarray(value).dtype.kind in ['U', 'S'] and\n np.asarray(value).size > 1):\n return True\n else:\n return False\n\n\ndef _set_nc_attribute(obj, key, value):\n if _is_list_of_strings(value):\n # encode as NC_STRING if attr is list of strings\n try:\n obj.setncattr_string(key, value)\n except AttributeError:\n # Inform users with old netCDF that does not support\n # NC_STRING that we can't serialize lists of strings\n # as attrs\n msg = ('Attributes which are lists of strings are not '\n 'supported with this version of netCDF. Please '\n 'upgrade to netCDF4-python 1.2.4 or greater.')\n raise AttributeError(msg)\n else:\n obj.setncattr(key, value)\n\n\nclass NetCDF4DataStore(WritableCFDataStore):\n \"\"\"Store for reading and writing data via the Python-NetCDF4 library.\n\n This store supports NetCDF3, NetCDF4 and OpenDAP datasets.\n \"\"\"\n\n def __init__(self, manager, lock=NETCDF4_PYTHON_LOCK, autoclose=False):\n import netCDF4\n\n if isinstance(manager, netCDF4.Dataset):\n _disable_auto_decode_group(manager)\n manager = DummyFileManager(GroupWrapper(manager))\n\n self._manager = manager\n self.format = self.ds.data_model\n self._filename = self.ds.filepath()\n self.is_remote = is_remote_uri(self._filename)\n self.lock = ensure_lock(lock)\n self.autoclose = autoclose\n\n @classmethod\n def open(cls, filename, mode='r', format='NETCDF4', group=None,\n clobber=True, diskless=False, persist=False,\n lock=None, lock_maker=None, autoclose=False):\n import netCDF4\n if (len(filename) == 88 and\n LooseVersion(netCDF4.__version__) < \"1.3.1\"):\n warnings.warn(\n 'A segmentation fault may occur when the '\n 'file path has exactly 88 characters as it does '\n 'in this case. The issue is known to occur with '\n 'version 1.2.4 of netCDF4 and can be addressed by '\n 'upgrading netCDF4 to at least version 1.3.1. '\n 'More details can be found here: '\n 'https://github.com/pydata/xarray/issues/1745')\n if format is None:\n format = 'NETCDF4'\n\n if lock is None:\n if mode == 'r':\n if is_remote_uri(filename):\n lock = NETCDFC_LOCK\n else:\n lock = NETCDF4_PYTHON_LOCK\n else:\n if format is None or format.startswith('NETCDF4'):\n base_lock = NETCDF4_PYTHON_LOCK\n else:\n base_lock = NETCDFC_LOCK\n lock = combine_locks([base_lock, get_write_lock(filename)])\n\n manager = CachingFileManager(\n _open_netcdf4_group, filename, lock, mode=mode,\n kwargs=dict(group=group, clobber=clobber, diskless=diskless,\n persist=persist, format=format))\n return cls(manager, lock=lock, autoclose=autoclose)\n\n @property\n def ds(self):\n return self._manager.acquire().value\n\n def open_store_variable(self, name, var):\n dimensions = var.dimensions\n data = indexing.LazilyOuterIndexedArray(\n NetCDF4ArrayWrapper(name, self))\n attributes = OrderedDict((k, var.getncattr(k))\n for k in var.ncattrs())\n _ensure_fill_value_valid(data, attributes)\n # netCDF4 specific encoding; save _FillValue for later\n encoding = {}\n filters = var.filters()\n if filters is not None:\n encoding.update(filters)\n chunking = var.chunking()\n if chunking is not None:\n if chunking == 'contiguous':\n encoding['contiguous'] = True\n encoding['chunksizes'] = None\n else:\n encoding['contiguous'] = False\n encoding['chunksizes'] = tuple(chunking)\n # TODO: figure out how to round-trip \"endian-ness\" without raising\n # warnings from netCDF4\n # encoding['endian'] = var.endian()\n pop_to(attributes, encoding, 'least_significant_digit')\n # save source so __repr__ can detect if it's local or not\n encoding['source'] = self._filename\n encoding['original_shape'] = var.shape\n encoding['dtype'] = var.dtype\n\n return Variable(dimensions, data, attributes, encoding)\n\n def get_variables(self):\n dsvars = FrozenOrderedDict((k, self.open_store_variable(k, v))\n for k, v in\n iteritems(self.ds.variables))\n return dsvars\n\n def get_attrs(self):\n attrs = FrozenOrderedDict((k, self.ds.getncattr(k))\n for k in self.ds.ncattrs())\n return attrs\n\n def get_dimensions(self):\n dims = FrozenOrderedDict((k, len(v))\n for k, v in iteritems(self.ds.dimensions))\n return dims\n\n def get_encoding(self):\n encoding = {}\n encoding['unlimited_dims'] = {\n k for k, v in self.ds.dimensions.items() if v.isunlimited()}\n return encoding\n\n def set_dimension(self, name, length, is_unlimited=False):\n dim_length = length if not is_unlimited else None\n self.ds.createDimension(name, size=dim_length)\n\n def set_attribute(self, key, value):\n if self.format != 'NETCDF4':\n value = encode_nc3_attr_value(value)\n _set_nc_attribute(self.ds, key, value)\n\n def encode_variable(self, variable):\n variable = _force_native_endianness(variable)\n if self.format == 'NETCDF4':\n variable = _encode_nc4_variable(variable)\n else:\n variable = encode_nc3_variable(variable)\n return variable\n\n def prepare_variable(self, name, variable, check_encoding=False,\n unlimited_dims=None):\n datatype = _get_datatype(variable, self.format,\n raise_on_invalid_encoding=check_encoding)\n attrs = variable.attrs.copy()\n\n fill_value = attrs.pop('_FillValue', None)\n\n if datatype is str and fill_value is not None:\n raise NotImplementedError(\n 'netCDF4 does not yet support setting a fill value for '\n 'variable-length strings '\n '(https://github.com/Unidata/netcdf4-python/issues/730). '\n \"Either remove '_FillValue' from encoding on variable %r \"\n \"or set {'dtype': 'S1'} in encoding to use the fixed width \"\n 'NC_CHAR type.' % name)\n\n encoding = _extract_nc4_variable_encoding(\n variable, raise_on_invalid=check_encoding,\n unlimited_dims=unlimited_dims)\n if name in self.ds.variables:\n nc4_var = self.ds.variables[name]\n else:\n nc4_var = self.ds.createVariable(\n varname=name,\n datatype=datatype,\n dimensions=variable.dims,\n zlib=encoding.get('zlib', False),\n complevel=encoding.get('complevel', 4),\n shuffle=encoding.get('shuffle', True),\n fletcher32=encoding.get('fletcher32', False),\n contiguous=encoding.get('contiguous', False),\n chunksizes=encoding.get('chunksizes'),\n endian='native',\n least_significant_digit=encoding.get(\n 'least_significant_digit'),\n fill_value=fill_value)\n _disable_auto_decode_variable(nc4_var)\n\n for k, v in iteritems(attrs):\n # set attributes one-by-one since netCDF4<1.0.10 can't handle\n # OrderedDict as the input to setncatts\n _set_nc_attribute(nc4_var, k, v)\n\n target = NetCDF4ArrayWrapper(name, self)\n\n return target, variable.data\n\n def sync(self):\n self.ds.sync()\n\n def close(self, **kwargs):\n self._manager.close(**kwargs)\n" ]
[ [ "numpy.asarray", "numpy.string_", "numpy.dtype" ] ]
WiraDKP/pytorch_gru_speaker_diarization
[ "b2c170e38f23eebd1bb168441b2e9111baef2461" ]
[ "src/model.py" ]
[ "from torch import nn\n\nclass SpeakerDiarization(nn.Module):\n \"\"\"\n N (n_batch), I (n_input), S (n_seq), L (n_layer), H (n_hidden), C (n_output)\n \"\"\"\n def __init__(self, n_input, n_output, n_hidden=128, n_layer=4, dropout=0.2):\n super().__init__()\n dropout = 0 if n_layer == 0 else dropout\n \n self.gru = nn.GRU(n_input, n_hidden, n_layer, dropout=dropout, batch_first=True)\n self.fc = nn.Linear(n_hidden, n_output)\n \n def forward(self, x, h):\n x = x.transpose(1, 2) # (N, I, S) -> (N, S, I)\n x, h = self.gru(x, h) # (N, S, I), (L, N, H) -> (N, S, H), (L, N, H)\n x = self.fc(x) # (N, S, H) -> (N, S, C)\n x = x.transpose(1, 2) # (N, S, C) -> (N, C, S)\n return x, h" ]
[ [ "torch.nn.Linear", "torch.nn.GRU" ] ]
oyuka1112/Stocks
[ "40e9b7c163ec512d5a4c03aca62b7554d1f2d4c3" ]
[ "Xy.py" ]
[ "import pandas as pd\nimport numpy as np\n\n#region Import data\n\ndef get_prices_df(percent_change=False, path='data/stock_prices.csv'):\n prices = pd.read_csv(path).set_index('date')\n prices.index = pd.to_datetime(prices.index)\n if percent_change:\n prices = prices.pct_change()\n return prices\n\ndef get_volumes_df(path='data/stock_volumes.csv'):\n vols = pd.read_csv(path).set_index('date')\n vols.index = pd.to_datetime(vols.index)\n vols = vols.fillna(0)\n return vols\n\ndef get_info_df(path='data/stock_info.csv'):\n info = pd.read_csv(path).set_index('Instrument')\n return info\n\ndef get_listings_df(path='data/listings.csv'):\n listings = pd.read_csv(path).set_index('date')\n listings.index = pd.to_datetime(listings.index)\n return listings\n\ndef get_data_defaults():\n return get_prices_df(), get_volumes_df(), get_info_df(), get_listings_df()\n\n#endregion\n\n#region Market data\n\ndef get_market_Xy(target_id=None, target_ahead_by=0, percent_change=False, path='data/stock_prices.csv'):\n \"\"\"\n Returns the features (X) and target matrices (y) as DataFrames.\n Result can be stored into two variables at once or into an iterable of size 2.\n \n Parameters\n ----------\n target_id : str\n Stock ID for the target matrix y. If not given, no target matrix will be set.\n target_ahead_by : int, default 1 (day)\n How many days ahead of the datetime index the target matrix will be set.\n percent_change : bool, default True\n Whether or not to return matrices containing percent changes (True) or prices (False).\n path : str, default 'data/stock_prices.csv'\n Filepath for the prices csv file.\n \n Returns\n -------\n X: pd.DataFrame\n Features matrix X. Rows are dates; columns are individual stock IDs.\n y: pd.DataFrame\n Target matrix y for stock_ID = target_ID. Rows are dates; column is labeled to match target_ahead_by.\n Returns None if no stock_id is given for parameter target_id.\n \"\"\"\n X = get_prices_df(percent_change=percent_change, path=path)\n y = None\n \n if target_id is not None:\n y = X.loc[:, [target_id]].shift(-target_ahead_by)\n y.rename({target_id: '{} +{} day'.format(target_id, target_ahead_by)}, axis=1, inplace=True)\n\n return X, y\n\ndef get_y(target_id=None, target_ahead_by=0, path='data/stock_prices.csv'):\n y = pd.read_csv(path, usecols=[\"date\", target_id]).set_index(\"date\")\n y.index = pd.to_datetime(y.index)\n \n if target_ahead_by != 0:\n y.rename({target_id: '{} +{} day'.format(target_id, target_ahead_by)}, axis=1, inplace=True)\n \n return y.shift(-target_ahead_by)\n \n\ndef get_delayed_X(stock, period_start=1, period_stop=20, period_step=1, end_date = \"2021-06-20\", days_before = 120, percent_change=False, path='data/stock_prices.csv'):\n market_X, _ = get_market_Xy(percent_change=percent_change, path=path)\n X = pd.DataFrame(market_X.loc[:, stock])\n for day in range(period_start, period_stop, period_step):\n X[str(day)+ \" day delay\"] = X[stock].shift(periods=day)\n\n X = X.drop(stock, axis=1)\n X = X.loc[:end_date].tail(days_before)\n return X\n\n#endregion" ]
[ [ "pandas.to_datetime", "pandas.read_csv", "pandas.DataFrame" ] ]
ttm/kolmogorov-smirnov
[ "2e13d22cb77e1916d639d4583d01f8e435792eee" ]
[ "scripts/cumSumKS.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\n# create some randomly ddistributed data:\ndata = np.random.randn(10000)\n\n# sort the data:\ndata_sorted = np.sort(data)\n\n# calculate the proportional values of samples\np = 1. * arange(len(data)) / (len(data) - 1)\n\n# plot the sorted data:\nfig = figure()\nax1 = fig.add_subplot(121)\nax1.plot(p, data_sorted)\nax1.set_xlabel('$p$')\nax1.set_ylabel('$x$')\n\nax2 = fig.add_subplot(122)\nax2.plot(data_sorted, p)\nax2.set_xlabel('$x$')\nax2.set_ylabel('$p$')\n" ]
[ [ "numpy.random.randn", "numpy.sort" ] ]
jthois/Temporal-Auditory-Coding-Features-for-Causal-Speech-Enhancement
[ "c33db76aa168bdfb19d23e36bb72843dd79b0a36" ]
[ "processing.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom utils import *\nfrom matplotlib import pyplot as plt\nfrom scipy.signal import stft\nimport scipy\nfrom config import *\nimport librosa\nimport numpy as np\nfrom six.moves import range # pylint: disable=redefined-builtin\nfrom pambox.inner import GammatoneFilterbank\nfrom analysis import audspace, low_pass_filter, high_pass_filter, get_tfs, get_env, upsample\nfrom pambox.inner import hilbert_envelope\n\n\ndef Mc_mask(x, y, method='naive', dyn_range=40):\n \"\"\"\n IRM = ((signal_power) / ((signal_power)+(noise_power))) ^ (β), where β = 1/2 usually\n Safe implementation here.\n :param x: Magnitudes 3-dim array (BATCH, freq, time)\n :param y: Magnitudes 3-dim array (BATCH, freq, time)\n :return: IRM\n \"\"\"\n assert np.amin(x) >= 0.0\n assert np.amin(y) >= 0.0\n beta = 0.5\n x_new = np.where(x < y, y, x)\n mask = np.power((y**2) / (x_new**2+ np.finfo(float).eps), beta)\n mask = np.clip(mask, a_min=0.0, a_max=1.0)\n return mask\n\n\ndef apply_mask(x, y, method='naive', dyn_range=40):\n y = np.clip(y, a_min=0.0, a_max=1.0)\n return x * y\n\ndef inv_magphase(mag, phase_angle):\n phase = np.cos(phase_angle) + 1.j * np.sin(phase_angle)\n return mag * phase\n\ndef specgram(audio, n_fft=512, hop_length=None, window='hann', center=True, mask=False, log_mag=False, re_im=False, dphase=False, mag_only=False):\n \"\"\"Spectrogram using librosa.\n\n Args:\n audio: 1-D array of float32 sound samples.\n n_fft: Size of the FFT.\n hop_length: Stride of FFT. Defaults to n_fft/2.\n mask: Mask the phase derivative by the magnitude.\n log_mag: Use the logamplitude.\n re_im: Output Real and Imag. instead of logMag and dPhase.\n dphase: Use derivative of phase instead of phase.\n mag_only: Don't return phase.\n\n Returns:\n specgram: [n_fft/2 + 1, audio.size / hop_length, 2]. The first channel is\n the logamplitude and the second channel is the derivative of phase.\n \"\"\"\n if not hop_length:\n hop_length = int(n_fft / 2.)\n\n fft_config = dict( n_fft=n_fft, win_length=n_fft, hop_length=hop_length, center=center, window=window)\n\n spec = librosa.stft(audio, **fft_config)\n\n if re_im:\n re = spec.real[:, :, np.newaxis]\n im = spec.imag[:, :, np.newaxis]\n spec_real = np.concatenate((re, im), axis=2)\n else:\n mag, phase = librosa.core.magphase(spec)\n phase_angle = np.angle(phase)\n\n if dphase:\n # Derivative of phase\n phase_unwrapped = np.unwrap(phase_angle)\n p = phase_unwrapped[:, 1:] - phase_unwrapped[:, :-1]\n p = np.concatenate([phase_unwrapped[:, 0:1], p], axis=1) / np.pi\n else:\n # Normal phase\n p = phase_angle / np.pi\n # Mask the phase\n if log_mag and mask:\n p = mag * p\n # Return Mag and Phase\n p = p.astype(np.float32)[:, :, np.newaxis]\n mag = mag.astype(np.float32)[:, :, np.newaxis]\n if mag_only:\n spec_real = mag[:, :, np.newaxis]\n else:\n spec_real = np.concatenate((mag, p), axis=2)\n return spec_real\n\n\ndef ispecgram(spec, n_fft=512, hop_length=None, window='hann', center=True, mask=False, log_mag=False, re_im=False, dphase=False, mag_only=False, normalize=False, num_iters=1000):\n \"\"\"Inverse Spectrogram using librosa.\n\n Args:\n spec: 3-D specgram array [freqs, time, (mag_db, dphase)].\n n_fft: Size of the FFT.\n hop_length: Stride of FFT. Defaults to n_fft/2.\n mask: Reverse the mask of the phase derivative by the magnitude.\n log_mag: Use the logamplitude.\n re_im: Output Real and Imag. instead of logMag and dPhase.\n dphase: Use derivative of phase instead of phase.\n mag_only: Specgram contains no phase.\n num_iters: Number of griffin-lim iterations for mag_only.\n\n Returns:\n audio: 1-D array of sound samples. Peak normalized to 1.\n \"\"\"\n if not hop_length:\n hop_length = n_fft // 2\n\n ifft_config = dict(win_length=n_fft, hop_length=hop_length, center=center, window=window)\n\n if mag_only:\n mag = spec[:, :, 0]\n phase_angle = np.pi * np.random.rand(*mag.shape)\n elif re_im:\n #\n spec_real = spec[:, :, 0] + 1.j * spec[:, :, 1]\n else:\n mag, p = spec[:, :, 0], spec[:, :, 1]\n if mask and log_mag:\n p /= (mag + 1e-13 * np.random.randn(*mag.shape))\n if dphase:\n # Roll up phase\n phase_angle = np.cumsum(p * np.pi, axis=1)\n else:\n phase_angle = p * np.pi\n\n phase = np.cos(phase_angle) + 1.j * np.sin(phase_angle)\n spec_real = mag * phase\n\n if mag_only:\n audio = griffin_lim(mag, phase_angle, n_fft, hop_length, num_iters=num_iters)\n else:\n audio = librosa.core.istft(spec_real, **ifft_config)\n\n if normalize:\n return np.squeeze(audio) / audio.max()\n else:\n return np.squeeze(audio)\n\n\ndef envelope_spectrogram(x, fs, fmin, fmax, nbands, tau_ms=8, use_hilbert=False, order=4, q=9.26):\n # Cochlear filtering\n fmin = 80 # corresponds to 4.55 filters per band at nsgt erb, 2.5 for 64 filters\n nsgt = NSGT_ERB(fs, len(x), 4.55, cutoff_frequency=6000, plot=False)# 82.1 to 6000 Hz with 128 filters\n xf = np.real(nsgt.forward_full_temp(x))\n xf = xf[13:, :]\n\n if use_hilbert:\n env = hilbert_envelope(xf) # (Bands, Time)\n env = low_pass_filter(env, fs, cutoff=50)\n else:\n env = np.maximum(xf, 0)\n env = low_pass_filter(env, fs, cutoff=50)\n env = np.maximum(env, 1e-9)\n # match energy\n scale_factor = np.sqrt(np.sum(np.square(xf), axis=-1) / np.sum(np.square(env), axis=-1))\n env = np.multiply(env, scale_factor[:, np.newaxis])\n\n # Integration\n tau = int(tau_ms / 1000 * fs) # 8ms\n win_slope = 1\n window = np.exp(-np.linspace(0, win_slope * tau - 1, tau) / tau)\n window = np.transpose(np.repeat(window[:, np.newaxis], nbands, axis=1)) # (frequency, time)\n y = np.transpose([np.sqrt(np.sum(window * env[:, i:i + tau]**2, axis=-1)) for i in range(0, env.shape[1] - tau, tau)])\n return y ** 0.5\n\ndef ienvelope_spectrogram(S, xn, fs, fmin, fmax, nbands, tau_ms=8, use_hilbert=False, order=4, q=9.26):\n # Cochlear filtering\n fmin = 80\n nsgt = NSGT_ERB(fs, len(xn), 4.55, cutoff_frequency=fmax, plot=False) # 82.1 to 6000 Hz with 128 filters\n xf = np.real(nsgt.forward_full_temp(xn))\n xf = xf[13:, :]\n\n # calculate TFS\n tfs, _ = get_tfs(xf, fs)\n fs_ds = int(1 / (tau_ms / 1000))\n\n # upsample S envelope and modulate tfs\n S = np.square(S)\n S_up = upsample(S, fs // fs_ds)\n S_up = np.maximum(S_up, 1e-12)\n S_up = low_pass_filter(S_up, fs, cutoff=50)\n\n # trim original sound\n tfs = tfs[:,:S_up.shape[1]]\n S_up = S_up[:,:tfs.shape[1]]\n y = np.multiply(S_up, tfs)\n\n y = np.sum(y, axis=0)\n\n return y\n\ndef tfs_spectrogram(x, fs, fmin, fmax, nbands, tau_ms=8, use_hilbert=False):\n # Cochlear filtering\n fmin = 125\n nsgt = NSGT_ERB(fs, len(x), 4.55, cutoff_frequency=fmax, plot=False) # 82.1 to 6000 Hz with 128 filters\n xf = np.real(nsgt.forward_full_temp(x))\n xf = xf[13:72, :]\n # TFS-route\n tfs = np.heaviside(xf, 0)\n tfs = low_pass_filter(tfs, fs, cutoff=2000)\n # Lateral Inhibitory Network #\n # derivative along the tonotopic axis.\n tfss = tfs[:-1, :] - tfs[1:, :]\n tfss = np.concatenate((tfss, [0.0*tfs[-1, :]]), axis=0) #\n # half-wave rectification\n tfs_rect = np.maximum(tfss, 0)\n tfs_lin = low_pass_filter(tfs_rect, fs, cutoff=2000) # was 10 original\n # Integration\n tau = int(tau_ms / 1000 * fs) # 8ms\n tfs_shape = (tfs_lin.shape[0], tfs_lin.shape[1] // tau - 1)\n tfs_out = np.zeros(tfs_shape)\n for index, i in enumerate(range(0, tfs_lin.shape[1] - tau - 1, tau)): # (freq, time)\n for f in range(tfs_shape[0]):\n # Differential excitation / fine structure adaptation / Phase-lock\n tfs_out[f, index] = np.sum(np.maximum(np.diff(tfs_lin[f, i:i + tau]),0)) / np.sqrt(f+1)\n return np.array(tfs_out)\n\n\ndef preemphasis(y, coef=0.97, zi=None, return_zf=False):\n '''Pre-emphasize an audio signal with a first-order auto-regressive filter:\n y[n] -> y[n] - coef * y[n-1]\n '''\n return scipy.signal.lfilter([1.0, -coef], [1.0], y)\n\ndef deemphasis(y, coef=0.97, zi=None, return_zf=False):\n '''Restore the Pre-emphasize effect of an audio signal with a first-order auto-regressive filter:\n y[n] -> y[n] + coef * y[n-1]\n '''\n return scipy.signal.lfilter([1], [1, -coef], y)\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.squeeze", "numpy.cumsum", "numpy.concatenate", "numpy.random.randn", "numpy.where", "numpy.square", "numpy.clip", "numpy.sin", "numpy.finfo", "numpy.diff", "scipy.signal.lfilter", "numpy.repeat", "numpy.zeros", "numpy.heaviside", "numpy.multiply", "numpy.amin", "numpy.random.rand", "numpy.array", "numpy.sum", "numpy.maximum", "numpy.cos", "numpy.unwrap", "numpy.angle" ] ]
Plutoyxt/BboxToolkit
[ "16539fc574f209b0d5ec527ca11381dff3380b4d" ]
[ "BboxToolkit/datasets/io.py" ]
[ "import os\nimport os.path as osp\nimport pickle\nimport time\nimport numpy as np\n\nfrom ..utils import get_bbox_dim\nfrom .misc import (read_img_info, change_cls_order, get_classes,\n prog_map)\n\n\ndef load_imgs(img_dir, ann_dir=None, classes=None, nproc=10,\n def_bbox_type='poly'):\n assert def_bbox_type in ['hbb', 'obb', 'poly', None]\n assert osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'\n if ann_dir is not None:\n print('ann_dir is no use in load_imgs function')\n\n print('Starting loading images information')\n start_time = time.time()\n imgpaths = [osp.join(img_dir, imgfile)\n for imgfile in os.listdir(img_dir)]\n infos = prog_map(read_img_info, imgpaths, nproc)\n\n if def_bbox_type is not None:\n for info in infos:\n if info is None:\n continue\n bbox_dim = get_bbox_dim(def_bbox_type)\n bboxes = np.zeros((0, bbox_dim), dtype=np.float32)\n labels = np.zeros((0, ), dtype=np.int64)\n info['ann'] = dict(bboxes=bboxes, labels=labels)\n classes = () if classes is None else classes\n end_time = time.time()\n print(f'Finishing loading images, get {len(infos)} iamges,',\n f'using {end_time-start_time:.3f}s.')\n return infos, classes\n\n\ndef load_pkl(ann_dir, img_dir=None, classes=None, nproc=10):\n assert osp.isfile(ann_dir), f'The {ann_dir} is not an existing pkl file!'\n assert img_dir is None or osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'\n\n print('Starting loading pkl information')\n start_time = time.time()\n data = pickle.load(open(ann_dir, 'rb'))\n old_classes, contents = data['cls'], data['content']\n\n if img_dir is not None:\n imgpaths = [osp.join(img_dir, content['filename'])\n for content in contents]\n infos = prog_map(read_img_info, imgpaths, nproc)\n for info, content in zip(infos, contents):\n content.update(info)\n\n if classes is None:\n classes = old_classes\n else:\n classes = get_classes(classes)\n change_cls_order(contents, old_classes, classes)\n end_time = time.time()\n print(f'Finishing loading pkl, get {len(contents)} iamges,',\n f'using {end_time-start_time:.3f}s.')\n return contents, classes\n\n\ndef save_pkl(save_dir, contents, classes):\n assert save_dir.endswith('.pkl')\n filepath = osp.split(save_dir)[0]\n if not osp.exists(filepath):\n os.makedirs(filepath)\n\n data = dict(cls=classes, content=contents)\n pickle.dump(data, open(save_dir, 'wb'))\n" ]
[ [ "numpy.zeros" ] ]
bgoli/pysces
[ "94e4734824e8f0eb9cfa9489e853afd4c9a37d23" ]
[ "pysces/PyscesJWSParse.py" ]
[ "\"\"\"\nPySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)\n\nCopyright (C) 2004-2020 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,\n\nBrett G. Olivier ([email protected])\nTriple-J Group for Molecular Cell Physiology\nStellenbosch University, South Africa.\n\nPermission to use, modify, and distribute this software is given under the\nterms of the PySceS (BSD style) license. See LICENSE.txt that came with\nthis distribution for specifics.\n\nNO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.\nBrett G. Olivier\n\"\"\"\nfrom __future__ import division, print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nfrom pysces.version import __version__\n\n__doc__ = \"PySCeS JWS parser module -- uses PLY 1.5 or newer\"\n\ntry:\n input = raw_input # Py2 compatibility\nexcept NameError:\n pass\n\nimport os, copy\nfrom .lib import lex\nfrom .lib import yacc\nfrom getpass import getuser\nfrom time import sleep, strftime\nfrom scipy import MachAr\n\nMyMachArr = MachAr()\n\n\nclass JWSParser:\n \"\"\"JWSParser written by Johann, based on Jannie's lexparse and integrated into PySCeS by brett -- converts PySCeS (.psc) files to JWS Online (jws) files\"\"\"\n\n ReactionIDs = [] # List of reaction names\n Names = [] # List of all reagent, parameter and function names\n LexErrors = [] # List of lexing errors\n\n NetworkDict = {} # Dictionary containing all reaction information\n InitStrings = [] # Initialisation strings\n InitParStrings = [] # Initialisation strings for parameters -- johann new\n InitVarStrings = [] # Initialisation strings for variables -- johann new\n Inits = [] # Initialised entities\n Reagents = [] # All reagents found during parsing of reactions\n VarReagents = [] # Variable reagents that occur in reactions\n FixedReagents = [] # Fixed reagents\n ReacParams = [] # Temporary list of reaction parameters\n InitParams = [] # Initialised parameters\n ParseErrors = []\n\n mach_spec = MyMachArr\n AllRateEqsGiven = 1 # Flag to check that all rate equations have been given\n Debug = 0\n\n ##############\n # Build the lexer\n ##############\n\n # elementary regular expressions used as building blocks\n Int = r'\\d+' # Integer\n Dec = Int + '\\.' + Int # Decimal\n\n # List of token names\n tokens = (\n 'FIXDEC',\n 'IRREV',\n #'REAL', # johann -- now build up real in a p function since we want to make exponent explicit\n 'INT',\n 'DEC', # johann -- added explicitly since we no longer have REAL token\n 'PLUS',\n 'MINUS',\n 'TIMES',\n 'DIVIDE',\n 'POWER',\n 'LPAREN',\n 'RPAREN',\n 'EQUALS',\n 'COMMA',\n 'REACTION_ID',\n 'STOICH_COEF',\n 'NAME',\n 'EXP',\n ) # johann -- new EXP token\n\n # Simple tokens\n t_IRREV = r'>'\n # t_REAL = Real # johann -- no longer have a real token, now a p function\n t_INT = Int\n t_DEC = Dec # new DEC token\n t_PLUS = r'\\+'\n t_MINUS = r'-'\n t_TIMES = r'\\*'\n t_DIVIDE = r'/'\n t_POWER = '\\*\\*'\n t_LPAREN = r'\\('\n t_RPAREN = r'\\)'\n t_EQUALS = r'='\n t_COMMA = r','\n t_ignore = ' \\t\\r' # Ignore spaces and tabs --- and windows return - brett 20040229\n\n def t_comment(self, t):\n r'\\#.+\\n' # Match from # to newline\n t.lineno += 1 # Increment line number\n\n def t_newline(self, t):\n r'\\n+' # Match newline\n t.lineno += len(t.value) # Increment with number of consecutive newlines\n\n def t_EXP(self, t): # johann -- need separate EXP token to replace for Mathematica\n r'\\d+\\.?\\d*[E|e][\\+|\\-]?' # define EXP token merely as digits[.]digits[E|e][+|-]\n t.type = 'EXP' # parse final integer separately in 'Real' p-function to remove leading zeros\n t.value = t.value.replace('e', ' 10^')\n t.value = t.value.replace('E', ' 10^')\n return t\n\n def t_FIXDEC(self, t):\n r'FIX:'\n t.type = 'FIXDEC'\n t.value = 'FIX:'\n return t\n\n def t_REACTION_ID(self, t):\n r'[a-zA-Z]\\w*:' # Match any letter followed by zero or more characters\n # in [a-zA-Z0-9_] up to a colon\n t.type = 'REACTION_ID'\n if t.value[0] == 'v' and len(t.value) > 1:\n t.value = t.value[\n 1:\n ] # remove initial 'v' if present to avoid constructions like 'v[vR1]'\n t.value = (\n 'v[' + t.value[:-1] + ']'\n ) # remove the colon and add v[] for JWS -- johann\n if t.value in self.ReactionIDs:\n self.LexErrors.append(('Duplicate ReactionID ', t.lineno, t.value, t.type))\n else:\n self.ReactionIDs.append(t.value)\n return t\n\n def t_STOICH_COEF(self, t):\n r'\\{\\d+\\}|\\{\\d+\\.\\d+\\}'\n t.type = 'STOICH_COEF'\n t.value = t.value[1:-1]\n return t\n\n def t_NAME(self, t):\n r'[a-zA-Z][\\w]*' # Match any letter followed by zero or characters in the set [a-zA-Z0-9_]\n if (t.value + '[t]' not in self.Names) and (\n t.value not in self.FuncNames\n ): # Only add to list if absent in list\n # self.Names.append(t.value)\n self.Names.append(t.value + '[t]') # -- johann\n # print self.Names[-1]\n # hack! - brett\n if (\n t.value not in self.FuncNames\n ): # make class attributes, ignore function names\n # print 't value before', t.value\n gt = t.value + '[t]'\n t.value = gt\n # print 't value after', t.value\n t.type = 'NAME'\n return t\n\n def t_error(self, t):\n self.LexErrors.append(('Lexer error ', t.lineno, t.value, t.type))\n print('Illegal character, Line ' + str(t.lineno) + ' :' + str(t.value[0]))\n t.skip(1)\n\n ##############\n # The parser #\n ##############\n\n FuncNames = (\n 'acos',\n 'asin',\n 'atan',\n 'atan2',\n 'ceil',\n 'cos',\n 'cosh',\n 'exp',\n 'fabs',\n 'floor',\n 'fmod',\n 'frexp',\n 'hypot',\n 'ldexp',\n 'log',\n 'log10',\n 'modf',\n 'pow',\n 'sin',\n 'sinh',\n 'sqrt',\n 'tan',\n 'tanh',\n )\n\n precedence = (\n ('left', 'PLUS', 'MINUS'),\n ('left', 'TIMES', 'DIVIDE'),\n ('left', 'POWER'),\n ('right', 'UMINUS'),\n )\n\n def Show(self, name, tok):\n if self.Debug:\n print(name, tok)\n\n def p_error(self, t):\n self.ParseErrors.append(('Syntax error ', t.lineno, t.value, t.type))\n print('Syntax error, Line ' + str(t.lineno) + ' : ' + str(t.value))\n tok = yacc.token()\n while tok and tok.type != 'REACTION_ID':\n tok = yacc.token()\n return tok\n\n def p_model(self, t):\n '''Model : Statement\n | Model Statement '''\n\n self.Show('Model', t[0])\n\n def p_statement(self, t):\n '''Statement : Fixed\n | ReactionLine\n | Initialise'''\n self.Show('Statement', t[0])\n\n def p_fixed(self, t):\n '''Fixed : FIXDEC FixList'''\n\n self.Show('Fixed:', t[0])\n\n def p_fixedreagents(self, t):\n '''FixList : NAME\n | NAME FixList'''\n if t[1] != None:\n self.FixedReagents.append(t[1][:-3]) # johann -- remove [t] off end\n t[0] = [t[1]]\n try:\n t[0] += t[2]\n except:\n pass\n self.Show('FixList', t[0])\n\n def p_initialise(self, t):\n '''Initialise : NAME EQUALS Expression'''\n t[1] = t[1][:-3] + '[0]' # johann 20050302 -- Mathematica initialisation\n t[0] = t[1] + t[2] + t[3]\n ## del temp\n self.InitStrings.append(t[0].replace('=', ' = '))\n self.Inits.append(t[1])\n self.Show('Initialisation', t[0])\n\n def p_reaction_line(self, t):\n '''ReactionLine : REACTION_ID ReactionEq\n | REACTION_ID ReactionEq Expression'''\n\n # global self.AllRateEqsGiven, ReacParams\n ReacID = t[1]\n if ReacID in self.NetworkDict:\n self.ParseErrors.append(('Duplicate Reaction ', t.lineno, ReacID, None))\n self.NetworkDict[ReacID] = {} # Reaction dictionary for ReacID\n self.NetworkDict[ReacID]['Reagents'] = {} # Reagent dictionary within ReacID\n\n # brett: if an index exists sum the coefficients instead of adding a new one\n # this seems to deal with multiple definitions like X + X > Y and 2{X} + Y > Z + X\n for i in t[2][\n 0\n ]: # First tuple member of ReactionEq contains list of (name,stoichcoef)\n if i[0] in self.NetworkDict[ReacID]['Reagents']:\n self.NetworkDict[ReacID]['Reagents'][i[0]] = (\n self.NetworkDict[ReacID]['Reagents'][i[0]] + i[1]\n )\n else:\n self.NetworkDict[ReacID]['Reagents'][i[0]] = i[\n 1\n ] # Key for reagent with stoichcoef value\n killList = []\n # brett: however for the case of X + Y > Y + Z where the sum of the coefficients\n # is zero we can delete the key (Y) out of the reaction list altgether (hopefully!)\n for i in self.NetworkDict[ReacID]['Reagents']:\n if (\n abs(self.NetworkDict[ReacID]['Reagents'][i])\n < self.mach_spec.eps * 100.0\n ):\n killList.append(i)\n # print self.mach_spec.eps*100.0, self.NetworkDict[ReacID]['Reagents']\n # print killList, self.NetworkDict[ReacID]['Reagents']\n # brett: and the easiest way of doing this is putting the zero keys in a list\n # and deleting them out of the dictionary\n if len(killList) != 0:\n for i in killList:\n del self.NetworkDict[ReacID]['Reagents'][i]\n # print killList, self.NetworkDict[ReacID]['Reagents']\n\n self.NetworkDict[ReacID]['Type'] = t[2][\n 1\n ] # Second tuple member of ReactionEq contains type\n try: # Save rate equation and create parameter list\n self.NetworkDict[ReacID]['RateEq'] = t[3]\n self.NetworkDict[ReacID]['Params'] = self.ReacParams\n self.ReacParams = [] # Reset global self.ReacParams list\n except:\n self.NetworkDict[ReacID]['RateEq'] = ''\n self.NetworkDict[ReacID]['Params'] = []\n self.AllRateEqsGiven = 0 # Set global flag to false\n self.Show('ReactionLine', t[0])\n self.Show('t1', t[1])\n self.Show('t2', t[2])\n self.Show('t3', t[3])\n\n def p_reaction_eq(self, t):\n '''ReactionEq : LeftHalfReaction EQUALS RightHalfReaction\n | LeftHalfReaction IRREV RightHalfReaction'''\n\n ReacType = ''\n if t[2] == '=':\n ReacType = 'Rever'\n elif t[2] == '>':\n ReacType = 'Irrev'\n t[0] = (t[1] + t[3], ReacType)\n self.Show('ReactionEq', t[0])\n\n def p_left_half_reaction(self, t):\n ''' LeftHalfReaction : SubstrateTerm\n | SubstrateTerm PLUS LeftHalfReaction'''\n\n # Make a list of substrate terms\n t[0] = [t[1]]\n\n try:\n t[0] += t[3]\n except:\n pass\n # brett\n # print \"lhr \", t[0]\n self.Show('LeftHalfReaction', t[0])\n\n def p_right_half_reaction(self, t):\n ''' RightHalfReaction : ProductTerm\n | ProductTerm PLUS RightHalfReaction'''\n\n # Make a list of product terms\n t[0] = [t[1]]\n try:\n t[0] += t[3]\n except:\n pass\n # brett\n # print \"rhr \", t[0]\n self.Show('RightHalfReaction', t[0])\n\n def p_substrate_term(self, t):\n '''SubstrateTerm : STOICH_COEF NAME\n | NAME'''\n\n # Make tuple of NAME and stoichiometric coefficient\n # (< 0 because substrate)\n try:\n t[0] = (t[2], -float(t[1]))\n if t[2] not in self.Reagents:\n self.Reagents.append(t[2])\n except:\n t[0] = (t[1], -1.0)\n if t[1] not in self.Reagents:\n self.Reagents.append(t[1])\n self.Show('SubstrateTerm', t[0])\n\n def p_product_term(self, t):\n '''ProductTerm : STOICH_COEF NAME\n | NAME'''\n\n # Make tuple of NAME and stoichiometric coefficient\n # (> 0 because product)\n try:\n t[0] = (t[2], float(t[1]))\n if t[2] not in self.Reagents:\n self.Reagents.append(t[2])\n except:\n t[0] = (t[1], 1.0)\n if t[1] not in self.Reagents:\n self.Reagents.append(t[1])\n self.Show('ProductTerm', t[0])\n\n def p_rate_eq(self, t):\n '''Expression : Expression PLUS Expression\n | Expression MINUS Expression\n | Expression TIMES Expression\n | Expression DIVIDE Expression\n | Power\n | Number\n | Func'''\n # |UMINUS : add if the\n # alternative for p_uminus is used\n\n if len(t.slice) == 4:\n t[0] = t[1] + t[2] + t[3]\n else:\n t[0] = t[1]\n\n def p_power(self, t):\n '''Power : Expression POWER Expression'''\n\n t[0] = (\n 'Power[' + t[1] + ',' + t[3] + ']'\n ) # changed to Mathematica notation -- johann\n\n def p_uminus(self, t):\n '''Expression : MINUS Expression %prec UMINUS'''\n # Alternative '''UMINUS : MINUS Expression'''\n\n t[0] = t[1] + t[2]\n\n def p_number(self, t):\n '''Number : Real\n | INT\n | DEC\n | NAME'''\n\n # Build list of entities\n try:\n float(t[1]) # check for a number\n except:\n if (\n (t[1] not in self.FuncNames)\n and (t[1] not in self.ReacParams)\n and (' 10^' not in t[1])\n ):\n # ignore function names, duplications and exponentials\n self.ReacParams.append(t[1])\n # self.ReacParams.append('self.' + t[1])\n t[0] = t[1]\n\n def p_real(self, t):\n '''Real : EXP INT'''\n loop = 1\n while loop == 1: # remove leading zeros from exponent\n if t[2][0] == '0' and len(t[2]) > 1:\n t[2] = t[2][1:]\n else:\n loop = 0\n t[0] = t[1] + t[2]\n\n def p_function(self, t):\n '''Func : LPAREN ArgList RPAREN\n | NAME LPAREN ArgList RPAREN'''\n\n try:\n t[0] = t[1] + t[2] + t[3] + t[4]\n except:\n t[0] = t[1] + t[2] + t[3]\n\n def p_arglist(self, t):\n '''ArgList : Expression\n | Expression COMMA Expression'''\n\n t[0] = t[1]\n try:\n t[0] += t[2] + t[3]\n except:\n pass\n\n ############################################\n # end of lexer and parser definitions\n ############################################\n\n def psc2jws(self, File, indir=None, outdir=None, quiet=1, debug=0):\n \"\"\"\n psc2jws(File,indir=None,outdir=None,quiet=1,debug=0)\n Convert a PySCeS (.psc) file to a JWS Online (.jws) file. Call with the input file name, note the input (indir) and output (outdir) can optionally be specified.\n\n Arguments:\n =========\n File: PSC input file\n indir [default=None]: directory of PSC file\n outdir [default=None]: output directory for JWS file\n quiet [default=1]: turn lex/parse noise on/off\n debug [default=0]: optionally output debug information\n\n \"\"\"\n if indir == None:\n indir = os.getcwd()\n if outdir == None:\n outdir = os.getcwd()\n if os.path.exists(os.path.join(indir, File)) and File[-4:] == '.psc':\n go = 1\n else:\n print('\\nIgnoring non-PySCeS model file: ' + os.path.join(indir, File))\n go = 0\n\n if go == 1:\n # clean up the modules\n reload(lex) # brett's bugbear code these have to be here ALWAYS!!\n reload(yacc)\n # clean up the instance\n self.ReactionIDs = [] # List of reaction names\n self.Names = [] # List of all reagent, parameter and function names\n self.LexErrors = [] # List of lexing errors\n self.NetworkDict = {} # Dictionary containing all reaction information\n self.InitStrings = [] # Initialisation strings\n\n self.Inits = [] # Initialised entities\n self.Reagents = [] # All reagents found during parsing of reactions\n self.FixedReagents = [] # Fixed reagents\n self.ReacParams = [] # Temporary list of reaction parameters\n self.ParseErrors = []\n\n self.InitParStrings = (\n []\n ) # Initialisation strings for parameters -- johann new\n self.InitVarStrings = (\n []\n ) # Initialisation strings for variables -- johann new\n self.VarReagents = [] # Variable reagents that occur in reactions\n self.InitParams = [] # Initialised parameters\n\n print('\\nParsing file: ' + os.path.join(indir, File))\n\n Data = open(os.path.join(indir, File), 'r')\n Model = Data.read()\n Data.close()\n\n self.Debug = debug\n self.AllRateEqsGiven = (\n 1 # Flag to check that all rate equations have been given\n )\n\n # try and find a temporary workspace or use cwd\n if 'TMP' in os.environ:\n tempDir = os.environ['TMP']\n elif 'TEMP' in os.environ:\n tempDir = os.environ['TEMP']\n else:\n tempDir = os.getcwd()\n\n os.chdir(tempDir)\n\n # fix filenames for intermediary files - brett\n if not File[:-4].isalnum():\n FileL = list(File)\n FileT = ''\n for let in FileL:\n if let.isalnum():\n FileT += let\n\n # instantiate the lexer and parser\n self.debugfile = '_jws' + FileT[:-3] + \".dbg\"\n self.tabmodule = '_jws' + FileT[:-3] + \"_\" + \"parsetab\"\n else:\n self.debugfile = '_jws' + File[:-4] + \".dbg\"\n self.tabmodule = '_jws' + File[:-4] + \"_\" + \"parsetab\"\n\n if self.Debug:\n print(self.tabmodule)\n print(self.debugfile)\n\n lex.lex(module=self, debug=self.Debug)\n lex.input(Model)\n yacc.yacc(\n module=self,\n debug=self.Debug,\n debugfile=self.debugfile,\n tabmodule=self.tabmodule,\n )\n\n os.chdir(outdir)\n\n while 1:\n tok = lex.token()\n if not tok:\n break\n if self.LexErrors != []:\n print('self.LexErrors = ', self.LexErrors, '\\n')\n\n while 1:\n p = yacc.parse(Model)\n if not p:\n break\n\n # we have the dictionary get rid of this stuff\n del Model, p\n\n # Create list of variable reagents and remove '[t]' from fixed reagents\n for i in range(\n len(self.Reagents)\n ): # johann -- new construction otherwise list elements not replaced\n if self.Reagents[i][:-3] not in self.FixedReagents:\n self.VarReagents.append(self.Reagents[i])\n if self.Reagents[i][:-3] in self.FixedReagents:\n self.Reagents[i] = self.Reagents[i][:-3]\n\n # Create list of initialised parameters\n for i in range(len(self.Inits)): # johann -- reworked extensively\n if self.Inits[i][:-3] + '[t]' not in self.VarReagents:\n self.InitStrings[i] = self.InitStrings[i].replace('[0]', '')\n self.InitStrings[i] = self.InitStrings[i].replace(\n '[t]', ''\n ) # capture params initialised i.t.o. other params\n self.Inits[i] = self.Inits[i][:-3]\n self.InitParams.append(self.Inits[i])\n self.InitParStrings.append(self.InitStrings[i])\n elif self.Inits[i][:-3] + '[t]' in self.VarReagents:\n self.InitVarStrings.append(self.InitStrings[i])\n\n # In self.NetworkDict, clean rate equation parameter list of variables that occur in that reaction\n # Add FixedReagent to Params even if not a parameter in rate eqn (requirement to add '$' below)\n for id in list(self.NetworkDict.keys()):\n for reag in self.VarReagents:\n if reag in self.NetworkDict[id]['Params']:\n self.NetworkDict[id]['Params'].remove(reag)\n for reag in self.FixedReagents:\n if (\n reag + '[t]' in list(self.NetworkDict[id]['Reagents'].keys())\n ) and (reag not in self.NetworkDict[id]['Params']):\n self.NetworkDict[id]['Params'].append(reag + '[t]')\n\n # Warn if no reagents have been fixed\n if self.FixedReagents == []:\n print('Warning: No reagents have been fixed')\n else: # Warn if a fixed reagent does not occur in a reaction equation\n for reag in self.FixedReagents:\n if reag not in self.Reagents:\n print(\n 'Warning: '\n + reag\n + ' (fixed) does not occur in any reaction'\n )\n\n # Check whether all parameters have been initialised\n # johann -- remove [t] from params\n for id in list(self.NetworkDict.keys()):\n for i in range(len(self.NetworkDict[id]['Params'])):\n self.NetworkDict[id]['Params'][i] = self.NetworkDict[id]['Params'][\n i\n ][:-3]\n if self.NetworkDict[id]['Params'][i] not in self.InitParams:\n print(\n 'Warning: Parameter '\n + self.NetworkDict[id]['Params'][i]\n + ' has not been initialised'\n )\n\n # Check whether all variable reagents have been initialised\n for reag in self.VarReagents:\n if reag[:-3] + '[0]' not in self.Inits:\n print('Warning: Variable ' + reag + ' has not been initialised')\n\n # Check that all initialised parameters actually occur in self.Inits\n known = 0\n for param in self.InitParams:\n for id in list(self.NetworkDict.keys()):\n if param in self.NetworkDict[id]['Params']:\n known = 1\n break\n else:\n known = 0\n if not known:\n print(\n 'Warning: '\n + param\n + ' has been initialised but does not occur in any rate equation'\n )\n\n # clean up rate equations in self.NetworkDict to remove [t] for Params\n # clean up Reagents to remove [t] and add $ for fixed\n for id in list(self.NetworkDict.keys()):\n for param in self.NetworkDict[id]['Params']:\n self.NetworkDict[id]['RateEq'] = self.NetworkDict[id][\n 'RateEq'\n ].replace(param + '[t]', param)\n for reag in list(self.NetworkDict[id]['Reagents'].keys()):\n if reag[:-3] in self.NetworkDict[id]['Params']:\n saveval = self.NetworkDict[id]['Reagents'].pop(reag)\n self.NetworkDict[id]['Reagents']['$' + reag[:-3]] = saveval\n else:\n saveval = self.NetworkDict[id]['Reagents'].pop(reag)\n self.NetworkDict[id]['Reagents'][reag[:-3]] = saveval\n\n # output errors\n if self.ParseErrors != []:\n print('Parse errors occurred: ', self.ParseErrors)\n\n # debugging\n if debug:\n print('\\n\\n\\n')\n print('\\nself.ReactionIDs: ', self.ReactionIDs)\n print('\\nself.NetworkDict: ', self.NetworkDict)\n print('\\nself.Names: ', self.Names)\n print('\\nself.Inits: ', self.Inits)\n print('\\nself.InitStrings: ', self.InitStrings)\n print('\\nself.InitParStrings: ', self.InitParStrings)\n print('\\nself.InitVarStrings: ', self.InitVarStrings)\n print('\\nself.InitParams: ', self.InitParams)\n print('\\nself.Reagents: ', self.Reagents)\n print('\\nself.FixedReagents: ', self.FixedReagents)\n print('\\nself.VarReagents: ', self.VarReagents)\n print('\\nParseErrors: ', self.ParseErrors)\n\n # now write the jws output file\n filename = File[:-4]\n filename = self.chkjws(filename)\n go = 0\n loop = 0\n filex = ''\n while loop == 0:\n try:\n filex = os.path.join(outdir, filename)\n f = open(filex, 'r')\n f.close()\n inp = input('\\nFile \"' + filex + '\" exists.\\nOverwrite? ([y]/n) ')\n if inp == 'y' or inp == '':\n go = 1\n loop = 1\n elif inp == 'n':\n filename = input(\n '\\nFile \"' + filename + '\" exists. Enter a new filename: '\n )\n go = 1\n filex = os.path.join(outdir, filename)\n filename = self.chkjws(filename)\n else:\n print('\\nInvalid input')\n except:\n print('\\nFile \"' + filex + '\" does not exist, proceeding...')\n loop = 1\n go = 1\n if go == 1:\n try:\n UseR = getuser()\n except:\n UseR = ''\n\n outFile = open(filex, 'w')\n header = ''\n # header += '############################################################\\n'\n header += '# JWS model input file \\n'\n header += (\n '# Generated by PySCeS ('\n + __version__\n + ') (http://pysces.sourceforge.net) \\n'\n )\n header += '# Pysces input file: ' + File + '\\n'\n header += (\n '# This file generated: '\n + strftime(\"%a, %d %b %Y %H:%M:%S\")\n + ' by '\n + UseR\n + ' \\n'\n )\n header += (\n '###########################################################\\n\\n'\n )\n outFile.write(header)\n\n # modelname\n modelname = File[:-4]\n outFile.write('begin name\\n' + modelname + '\\nend name\\n\\n')\n\n # reactions and rate equations\n reaction_list = []\n rateeq_list = []\n\n nd = self.NetworkDict\n reaclist = copy.copy(\n list(nd.keys())\n ) # johann -- to sort self.ReactionIDs neatly ;-)\n reaclist.sort()\n for key in reaclist: # key = reaction name\n reagL = []\n reagR = []\n Req = copy.copy(nd[key]['RateEq'])\n for reagent in nd[key]['Reagents']:\n if nd[key]['Reagents'][reagent] > 0:\n reagR.append(\n '{'\n + str(abs(nd[key]['Reagents'][reagent]))\n + '}'\n + reagent\n )\n elif nd[key]['Reagents'][reagent] < 0:\n reagL.append(\n '{'\n + str(abs(nd[key]['Reagents'][reagent]))\n + '}'\n + reagent\n )\n substring = ''\n count = 0\n for x in reagL:\n if count != 0:\n substring += ' + '\n substring += x.replace(' ', '')\n count += 1\n prodstring = ''\n count = 0\n for x in reagR:\n if count != 0:\n prodstring += ' + '\n prodstring += x.replace(' ', '')\n count += 1\n symbol = ' = '\n reaction_list.append(key + '\\t' + substring + symbol + prodstring)\n rateeq_list.append(key + ' = ' + Req)\n outFile.write('begin reactions\\n')\n for x in reaction_list:\n outFile.write(x + '\\n')\n outFile.write('end reactions\\n\\n')\n outFile.write('begin rate equations\\n')\n for x in rateeq_list:\n outFile.write(x + '\\n')\n outFile.write('end rate equations\\n\\n')\n\n # parameters\n outFile.write('begin parameters\\n')\n for x in self.InitParStrings:\n outFile.write(x + '\\n')\n outFile.write('end parameters\\n\\n')\n\n # species initial values\n outFile.write('begin initial conditions\\n')\n for x in self.InitVarStrings:\n outFile.write(x + '\\n')\n outFile.write('end initial conditions\\n\\n')\n\n # close output file\n outFile.close()\n\n # print to stdout if quiet is set to zero\n if quiet == 0:\n print('\\nModel name: ' + modelname)\n\n print(\"\\nReactions:\")\n for x in reaction_list:\n print(x)\n\n print(\"\\nRate Equations:\")\n for x in rateeq_list:\n print(x)\n\n print('\\nParameters:')\n for x in self.InitParStrings:\n print(x)\n\n print('\\nSpecies Initial Values:')\n for x in self.InitVarStrings:\n print(x)\n\n def chkjws(self, File):\n \"\"\"\n chkjws(File)\n\n Checks if a filename has a .jws extension and adds one to the returned filename if needed\n\n Arguments:\n =========\n File: the filename to check\n\n \"\"\"\n try:\n if File[-4:] == '.jws':\n pass\n else:\n print('Assuming extension is .jws')\n File += '.jws'\n except:\n print('Chkjws error')\n return File\n\n\nif __name__ == '__main__':\n import os, sys\n from time import sleep\n\n inDiR = 'c://mypysces//pscmodels'\n outDiR = 'c://mypysces//jws'\n\n jwp = JWSParser()\n for mod in os.listdir(inDiR):\n jwp.psc2jws(mod, indir=inDiR, outdir=outDiR, quiet=1, debug=0)\n\n psp = PySCeSParser(debug=0)\n" ]
[ [ "scipy.MachAr" ] ]
daxixi/Context-Aware-Compilation-of-DNN-Training-Pipelines-across-Edge-and-Cloud
[ "bcdd23ba8424717db67dd0a09f40d0126b7c9851" ]
[ "computation.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom torch.autograd import Variable\r\nimport copy\r\nimport time\r\nimport pickle\r\nimport lz4.frame\r\n\r\nimport decision.engine as decision_engine\r\n\r\ndef edge_forward(models,inputs,use_Q):\r\n x=inputs.cuda()\r\n for model in models:\r\n x=model(x)\r\n x=x.detach().cpu()\r\n if not use_Q:\r\n return x,0\r\n else:\r\n min_range = max(torch.min(x).item(),-128)\r\n max_range = min(torch.max(x).item(),128)\r\n Q = torch.quantize_per_tensor(x, scale=(max_range-min_range)/(2**8), zero_point=int(min_range), dtype=torch.qint8)\r\n E=torch.dequantize(Q)-x\r\n Q = pickle.dumps(Q)\r\n return Q,E\r\n\r\ndef edge(iters,models,inputs,label,meter,optimizers):\r\n label=label.cuda()\r\n outputs=Variable(inputs)\r\n x=outputs.cuda()\r\n for model in models:\r\n x=model(x)\r\n loss=F.cross_entropy(x,label)\r\n for optim in optimizers:\r\n if optim==\"FREE\":\r\n pass\r\n else:\r\n optim.zero_grad()\r\n loss.backward()\r\n for optim in optimizers:\r\n if optim==\"FREE\":\r\n continue\r\n else:\r\n optim.step()\r\n tt,loss_i,cc=summary_iter(x.detach().cpu(),label.cpu())\r\n meter.update(tt,loss_i,cc)\r\n if iters%100==0:\r\n print('train %03d %e %f'%(iters,meter.losses/meter.cnt,meter.correct/meter.cnt))\r\n\r\ndef cloud(models,inputs,lable,optimizers,point,useQ):\r\n lable=lable.cuda()\r\n outputs=Variable(inputs, requires_grad=True).cuda()\r\n outputs.retain_grad()\r\n x=outputs\r\n for model in models:\r\n x=model(x)\r\n loss=F.cross_entropy(x,lable)\r\n for optim in optimizers[point:]:\r\n if optim==\"FREE\":\r\n pass\r\n else:\r\n optim.zero_grad()\r\n loss.backward()\r\n for optim in optimizers[point:]:\r\n if optim==\"FREE\":\r\n continue\r\n else:\r\n optim.step()\r\n grad=outputs.grad.detach()\r\n return x.detach().cpu(),grad.cpu()\r\n\r\ndef edge_backward(models,gradients,inputs,optimizers,point):\r\n try:\r\n x=torch.autograd.Variable(inputs,requires_grad=True).cuda()\r\n for model in models:\r\n x=model(x)\r\n except:\r\n x=models[0](inputs.cuda())\r\n x=torch.autograd.Variable(x,requires_grad=True).cuda()\r\n for model in models[1:]:\r\n x=model(x)\r\n gradients=gradients.cuda()\r\n for optim in optimizers[:point+1]:\r\n if optim==\"FREE\":\r\n pass\r\n else:\r\n optim.zero_grad()\r\n x.backward(gradients)\r\n for optim in optimizers[:point+1]:\r\n if optim==\"FREE\":\r\n continue\r\n else:\r\n optim.step()\r\n \r\ndef test_edge(models,client,test_dataloader):\r\n t=0\r\n c=0\r\n for i, (data, target) in enumerate(test_dataloader):\r\n x=data.cuda()\r\n for model in models:\r\n x=model(x)\r\n feature=copy.copy(x)\r\n feature=feature.detach().cpu()\r\n client.send_tensor('valid',-2,i,target,feature,False)\r\n #download\r\n _,_,_,result,gradient,_,_,_,_,_,_=client.recieve_tensor()\r\n _,id=torch.max(result,1)\r\n correct=torch.sum(id==target.data)\r\n t+=correct.data.item()\r\n c+=target.shape[0]\r\n print(\"test:{}\".format(t/c)) \r\n\r\ndef summary_iter(result,label):\r\n _,id=torch.max(result,1)\r\n correct=torch.sum(id==label.data)\r\n loss=F.cross_entropy(result.cuda(),label.cuda())\r\n return correct.data.item(),loss.data.item(),label.shape[0]\r\n\r\ndef edge_backprocess(head,epoch,iters,result,gradient,Q_history,meter,models,optims,point,report_freq=100):\r\n item='None'\r\n topic='None'\r\n #print(head)\r\n if head=='Train':\r\n s=time.time()\r\n bepoch,bi,inputs,label,E=Q_history.get()\r\n #error feedback\r\n if not isinstance(E,int):\r\n heisen=gradient*gradient\r\n gradient=gradient-E*heisen\r\n #print(epoch==bepoch and bi==iters)\r\n edge_backward(models,gradient,inputs,optims,point)\r\n tt,loss_i,cc=summary_iter(result,label)\r\n meter.update(tt,loss_i,cc)\r\n if iters%report_freq==0:\r\n print('train %03d %e %f'%(iters,meter.losses/meter.cnt,meter.correct/meter.cnt))\r\n #print(bi,\"backward\",time.time()-s)\r\n #print(bi,'end',time.time())\r\n elif head=='Valid':\r\n inputs,label=Q_history.get()\r\n tt,loss_i,cc=summary_iter(result,label)\r\n meter.update(tt,loss_i,cc)\r\n if iters%report_freq==0:\r\n print('valid %03d %e %f'%(iters,meter.losses/meter.cnt,meter.correct/meter.cnt))\r\n elif head=='EndTrain':\r\n loss=meter.losses/meter.cnt\r\n acc=meter.correct/meter.cnt\r\n meter.reset()\r\n torch.cuda.synchronize()\r\n end=time.time()\r\n item='EndTrain'\r\n topic=(loss,acc,end)\r\n elif head=='EndValid':\r\n acc=meter.correct/meter.cnt\r\n meter.reset()\r\n item='EndValid'\r\n topic=acc\r\n return item,topic \r\n\r\ndef dynamic_decision(upload,download,models,global_models,remain_epoch,edge,cloud,feature_size,model_size,K,point,qtime):\r\n #test bandwdith\r\n upload=abs(upload)\r\n download=abs(download)\r\n #print(\"upload speed {} and download speed {}\".format(upload,download))\r\n #change partition\r\n estimate_latency,new_point, use_Q=decision_engine.decide_point(edge,cloud,feature_size,upload,download,model_size,point,K,remain_epoch,qtime)\r\n\r\n return estimate_latency,new_point,use_Q\r\n\r\ndef dynamic_change(client,models,global_models,point,new_point):\r\n for model in models:\r\n model=model.cpu()\r\n global_models[:point+1]=models\r\n if point<new_point:\r\n #download point+1,...,new_point from cloud\r\n client.recieve_and_update_model(global_models)\r\n else:\r\n #upload new_point+1,...,point to cloud\r\n index=(new_point+1,point)\r\n client.send_model(index,global_models[new_point+1:point+1])\r\n models=global_models[:new_point+1]\r\n for model in models:\r\n model=model.cuda()\r\n model.train()\r\n return models\r\n \r\ndef cloud_dynamic_change_model(global_models,models,point,epoch,iters,server):\r\n global_models[point:]=models\r\n for model in models:\r\n model=model.cpu()\r\n if epoch<iters:\r\n #send param to edge\r\n index=(epoch+1,iters)\r\n server.send_model(index,global_models[epoch+1:iters+1])\r\n else:\r\n #recv param from edge\r\n server.recieve_and_update_model(global_models)\r\n point=iters+1\r\n models=global_models[point:]\r\n for model in models:\r\n model=model.cuda()\r\n model.train()\r\n return point, models \r\n" ]
[ [ "torch.cuda.synchronize", "torch.max", "torch.optim.zero_grad", "torch.min", "torch.nn.functional.cross_entropy", "torch.sum", "torch.dequantize", "torch.optim.step", "torch.autograd.Variable" ] ]
zhizhangxian/DeepLab-v3-plus-cityscapes
[ "c61f5a80ee35b92abf37e73176ecc20068933fe2" ]
[ "demo.py" ]
[ "# from cityscapes import CityScapes, collate_fn2\n\n# from tqdm import tqdm\n# from torch.utils.data import DataLoader\n# from configs.configurations import Config\n\n\n\n# # def get_overlaps(cur_cors, ori_cors):\n# # overlaps = []\n# # up = max(ori_cors[0][0], ori_cors[1][0])\n# # left = max(ori_cors[0][1], ori_cors[1][1])\n# # down = min(ori_cors[0][2], ori_cors[1][2])\n# # right = min(ori_cors[0][3], ori_cors[1][3])\n# # up_left = (up, left)\n# # down_right = (down, right)\n\n# # for i in range(len(cur_cors)):\n# # ori_cor = ori_cors[i]\n# # cur_cor = cur_cors[i]\n# # size_y, size_x = cur_cor[2] - cur_cor[0], cur_cor[3] - cur_cor[1]\n# # _up_left = (round(cur_cor[0] + size_y * (up_left[0] - ori_cor[0]) / (ori_cor[2] - ori_cor[0])),\n# # round(cur_cor[1] + size_x * (up_left[1] - ori_cor[1]) / (ori_cor[3] - ori_cor[1])))\n# # _down_right = (round(cur_cor[0] + size_y * (down_right[0] - ori_cor[0]) / (ori_cor[2] - ori_cor[0])),\n# # round(cur_cor[1] + size_x * (down_right[1] - ori_cor[1]) / (ori_cor[3] - ori_cor[1])))\n# # overlaps.append([_up_left, _down_right])\n\n# # return overlaps\n\n\n\n# def get_hw(overlap):\n# up, left = overlap[0]\n# down, right = overlap[1]\n# h = down - up\n# w = right - left\n\n# return h, w\n\n# if __name__ == '__main__':\n# cfg = Config()\n# # cfg.datapth = r'D:\\datasets\\cityscapes'\n# ds = CityScapes(cfg, mode='train', num_copys=2)\n# # for i in range(100):\n# # sample = ds[0]\n# # overlap1, overlap2 = sample['overlap']\n\n\n# # h1, w1 = get_hw(overlap1)\n# # h2, w2 = get_hw(overlap2)\n# # print((h1 == h2) and (w1 == w2))\n# # print(h1, w1)\n\n# dl = DataLoader(ds,\n# batch_size = 4,\n# shuffle = True,\n# num_workers = 4,\n# collate_fn=collate_fn2,\n# drop_last = True)\n# for im_lb in dl:\n# im = im_lb[0]\n# lb = im_lb[1]\n# print(im.shape)\n# print(lb.shape)\n# break\n\nimport torch\nimport torch.nn as nn\n\nimport numpy as np\n\nfrom cityscapes import CityScapes, collate_fn2\nfrom loss import OhemCELoss, pgc_loss\n\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\n# from models.deeplabv3plus import Deeplab_v3plus\nfrom configs.configurations import Config\n\nimport cv2\n\n\ndef Rec(img, box, point_color = (0, 255, 0), thickness = 1, lineType = 4, show=False, crop=False):\n\n if show:\n ptLeftTop = (box[0][1], box[0][0])\n ptRightBottom = (box[1][1], box[1][0])\n\n img = cv2.rectangle(img, ptLeftTop, ptRightBottom, point_color, thickness, lineType)\n cv2.imshow('AlanWang', img)\n cv2.waitKey(0) # 显示 10000 ms 即 10s 后消失 \n\n\n if crop:\n return img[box[0][0]:box[1][0], box[0][1]:box[1][1]]\n return img\n\n\n\n\n# def Rec(img, box, point_color = (0, 255, 0), thickness = 1, lineType = 4):\n\n# ptLeftTop, ptRightBottom = box[0], box[1]\n \n# img = cv2.rectangle(img, ptLeftTop, ptRightBottom, point_color, thickness, lineType)\n \n# cv2.imshow('AlanWang', img)\n# cv2.waitKey(0) # 显示 10000 ms 即 10s 后消失 \n# return img\n\n\nif __name__ == \"__main__\":\n cfg = Config()\n cfg.datapth = r'D:\\datasets\\cityscapes'\n cfg.crop_size = (384, 384)\n ds = CityScapes(cfg, mode='train', num_copys=2)\n\n\n # new_imlbs = ds[0]\n # overlaps = new_imlbs['overlap']\n # flips = new_imlbs['flip']\n # flip = flips[0] * flips[1]\n # _ims = new_imlbs['im']\n # im1, im2 = np.array(_ims[0]), np.array(_ims[1])\n # img = np.hstack([im1, im2])\n # cv2.imwrite('ori_stack.jpg', img)\n # im1 = Rec(im1, overlaps[0], crop=True)\n # im2 = Rec(im2, overlaps[1], crop=True)\n # if flip == -1:\n # im2 = cv2.flip(im2, 1)\n # if (im1 == im2).all():\n # print('exit')\n # exit()\n # else:\n # print('noop')\n # img = np.hstack([im1, im2])\n # cv2.imwrite('stack.jpg', img)\n\n # exit()\n\n\n\n\n\n\n\n # sample = ds[0]\n # ims, lbs, overlap = sample['im'], sample['lb'], sample['overlap']#, sample['flip'],\n # try:\n # flip = sample['flip']\n # except:\n # flip = 1\n # # print(overlap)\n # im1, im2 = ims\n\n # im1 = np.array(im1)\n # im2 = np.array(im2)\n\n # print(flip)\n\n\n\n # # box1 = [(overlap[0][0][0], overlap[0][0][1]), (overlap[0][1][0], overlap[0][1][1])]\n # box1 = overlap[0]\n # print(box1)\n # # box2 = [(overlap[1][0][0], overlap[1][0][1]), (overlap[1][1][0], overlap[1][1][1])]\n # box2 = overlap[1]\n # print(box2)\n\n\n # im1 = Rec(im1, box1, show=False, crop=True)\n # im2 = Rec(im2, box2, show=False, crop=True)\n # if flip[0] * flip[1] == -1:\n # im2 = cv2.flip(im2, 1)\n # if (im1 == im2).all():\n # exit()\n # img = np.hstack([im1, im2])\n # cv2.imshow('img', img)\n # cv2.waitKey(0)\n # exit()\n\n \n\n\n\n dl = DataLoader(ds,\n batch_size = 2,\n shuffle = False,\n num_workers = 2,\n collate_fn=collate_fn2,\n drop_last = True)\n for im_lb in dl:\n break\n\n\n diter = iter(dl)\n im, lb, overlap, flip = next(diter)\n print(flip[0])\n\n def crop(lb, overlap):\n return lb[overlap[0][0]:overlap[1][0], overlap[0][1]:overlap[1][1]]\n\n lb = lb.squeeze(1)\n lb1 = crop(lb[0], overlap[0][0])\n lb2 = crop(lb[1], overlap[0][1])\n if flip[0] == -1:\n lb2 = torch.flip(lb2, [1])\n\n print(lb1.shape)\n print(lb2.shape)\n \n print(lb.shape)\n print(overlap)\n\n print((lb1 == lb2).all())\n\n\n # # box1 = [(overlap[0][0][0], overlap[0][0][1]), (overlap[0][1][0], overlap[0][1][1])]\n # box1 = overlap[0]\n # print(box1)\n # # box2 = [(overlap[1][0][0], overlap[1][0][1]), (overlap[1][1][0], overlap[1][1][1])]\n # box2 = overlap[1]\n # print(box2)\n\n # net = Deeplab_v3plus(cfg)\n # net.cuda()\n # net.train()\n # net = nn.DataParallel(net)\n # diter = iter(dl)\n # im, lb, overlap, flip = next(diter)\n\n\n \n\n\n\n # lb = lb.cuda()\n # lb = lb.squeeze(1)\n # im1, im2 = im[::2], im[1::2]\n # logits1 = net(im1)\n # logits2 = net(im2)\n # outputs = []\n # for f1, f2 in zip(logits1, logits2):\n # outputs.append([f1, f2])\n # n_min = cfg.ims_per_gpu*cfg.crop_size[0]*cfg.crop_size[1]//16\n # criteria = OhemCELoss(thresh=cfg.ohem_thresh, n_min=n_min).cuda()\n # Criterion = pgc_loss(use_pgc = [0,1,2], criteria=criteria)\n # mse, sym_ce, mid_mse, mid_ce, mid_l1, ce = Criterion(outputs, overlap, flip, lb)\n # loss = cfg.beta * sym_ce + ce\n # gc_loss = sum(mid_mse)\n # loss += cfg.alpha * gc_loss\n # loss.backward()\n # print(loss)\n\n # for i in range(100):\n # # with torch.no_grad():\n # in_ten = torch.randn((1, 3, 768, 768)).cuda()\n # logits = net(in_ten)\n \n # for logit in logits:\n # print(logit.size())\n # break\n" ]
[ [ "torch.flip", "torch.utils.data.DataLoader" ] ]
Innovativaltd/RiskAdjustedReturn
[ "a2eacbf8ea7d24f81c9eb2d5ab49aba6d028166b" ]
[ "notebooks/risk_adjusted_return.py" ]
[ "# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\n# standard imports\nimport warnings\nimport math\nimport cmath\nwarnings.filterwarnings('ignore')\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\nimport yfinance as yf\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom sklearn.linear_model import LinearRegression\nimport os.path\nimport datetime\nfrom datetime import date\nimport random\nfrom nsetools import Nse\nfrom dataclasses import dataclass, field\nfrom typing import Dict\nimport traceback\n\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\n# creating a Nse object\nnse = Nse()\n\npd.set_option('display.max_rows', None)\n\n# Find the most recent working day, to avoid trying to download all stocks data from weekends\nend_dt = dt.datetime.today()\nday_of_the_week = end_dt.weekday() # starts from 0 for Monday end ends on 6 for Sunday\nif ( day_of_the_week == 5): # Saturday & Sunday\n end_dt = end_dt - dt.timedelta(days=1)\nelif ( day_of_the_week == 6): # Saturday & Sunday\n end_dt = end_dt - dt.timedelta(days=2)\n\nDD = dt.timedelta(days=365*20)\nstart_dt = end_dt - DD\nend = end_dt.strftime(\"%Y-%m-%d\")\nstart = start_dt.strftime(\"%Y-%m-%d\")\nprint(\"Date as string - start:{} end:{}\".format(start, end))\nannual_risk_free_interest_rate = (5.1/100)\n# compound interest rate is translated into a monthly rate with this formula: i_monthly = (1 + i_annual) ^ (1/12) – 1\nmonthly_risk_free_interest_rate = (1 + annual_risk_free_interest_rate / 100 )**(1/12) - 1\nequity_risk_premium = (8.0/100) # India Equity Risk Premium (2021) study: https://incwert.com/india-equity-risk-premium-2021/\n#tickers=['^NSEI', 'ACC.NS','TCS.NS', 'INFY.NS', 'WIPRO.NS']\nt=['^NSEI', 'ACC.NS', 'AJANTSOY.BO', 'AMARAJABAT.NS', 'AROGRANITE.NS', 'ASHOKLEY.NS', 'ATULAUTO.NS', 'BAJAJST.BO', 'BHARTIARTL.NS', 'CANBK.NS', 'CYIENT.NS', 'DABUR.NS', 'DCW.NS', 'DECCANCE.NS', 'DEEPAKNTR.NS', 'DELTACORP.NS', 'DIXON.NS', 'DWARKESH.NS', 'EIDPARRY.NS', 'FSL.NS', 'GPIL.NS', 'HAL.NS', 'HAPPSTMNDS.NS', 'HEMIPROP.NS', 'HGS.NS', 'HINDALCO.NS', 'IGPL.NS', 'INDHOTEL.NS', 'INDIACEM.NS', 'INDNIPPON.NS', 'INTENTECH.NS', 'INTLCONV.NS', 'ITC.NS', 'JENBURPH.BO', 'JKTYRE.NS', 'JSLHISAR.NS', 'LGBBROSLTD.NS', 'LINCOLN.NS', 'LT.NS', 'MANALIPETC.NS', 'MCX.NS', 'MIDHANI.NS', 'MINDTREE.NS', 'MPHASIS.NS', 'NMDC.NS', 'NTPC.NS', 'OPTOCIRCUI.NS', 'PNB.NS', 'POWERGRID.NS', 'RELIANCE.NS', 'RPOWER.NS', 'SUZLON.NS', 'TATACHEM.NS', 'TATAMTRDVR.NS', 'TATAPOWER.NS', 'TATASTEEL.NS', 'TATASTLLP.NS', 'TCS.NS', 'TINPLATE.NS', 'TITAN.NS', 'ULTRACEMCO.NS', 'WOCKPHARMA.NS', 'ZEEL.NS']\nj=['^NSEI', 'ACC.NS', 'ANSALHSG.NS', 'DABUR.NS', 'DLF.NS', 'GICHSGFIN.NS', 'GMRINFRA.NS', 'IDBI.NS', 'JPASSOCIAT.NS', 'LT.NS', 'Parsvnath.NS', 'PURVA.NS', 'RELCAPITAL.NS', 'Reliance.NS', 'Suzlon.NS', 'TTML.NS', 'UNITECH.NS']\nnifty500=['^NSEI', '3MINDIA.NS', 'ABB.NS', 'ACC.NS', 'AIAENG.NS', 'APLAPOLLO.NS', 'AUBANK.NS', 'POWERINDIA.NS', 'AARTIDRUGS.NS', 'AARTIIND.NS', 'AAVAS.NS', 'ABB.NS', 'ABBOTINDIA.NS', 'ABCAPITAL.NS', 'ABFRL.NS', 'ACC.NS', 'ADANIENT.NS', 'ADANIGREEN.NS', 'ADANIPORTS.NS', 'ADANITRANS.NS', 'ADVENZYMES.NS', 'AEGISCHEM.NS', 'AFFLE.NS', 'AIAENG.NS', 'AJANTPHARM.NS', 'ALEMBICLTD.NS', 'ALKEM.NS', 'ALKYLAMINE.NS', 'ALOKINDS.NS', 'AMARAJABAT.NS', 'AMBER.NS', 'AMBUJACEM.NS', 'ANGELBRKG.NS', 'ANURAS.NS', 'APLAPOLLO.NS', 'APLLTD.NS', 'APOLLOHOSP.NS', 'APOLLOTYRE.NS', 'ASAHIINDIA.NS', 'ASHOKA.NS', 'ASHOKLEY.NS', 'ASIANPAINT.NS', 'ASTERDM.NS', 'ASTRAL.NS', 'ASTRAZEN.NS', 'ATGL.NS', 'ATUL.NS', 'AUBANK.NS', 'AUROPHARMA.NS', 'AVANTIFEED.NS', 'AXISBANK.NS', 'BAJAJ-AUTO.NS', 'BAJAJCON.NS', 'BAJAJELEC.NS', 'BAJAJFINSV.NS', 'BAJAJHLDNG.NS', 'BAJFINANCE.NS', 'BALAMINES.NS', 'BALKRISIND.NS', 'BALRAMCHIN.NS', 'BANDHANBNK.NS', 'BANKBARODA.NS', 'BANKINDIA.NS', 'BASF.NS', 'BATAINDIA.NS', 'BAYERCROP.NS', 'BBTC.NS', 'BDL.NS', 'BEL.NS', 'BEML.NS', 'BERGEPAINT.NS', 'BHARATFORG.NS', 'BHARATRAS.NS', 'BHARTIARTL.NS', 'BHEL.NS', 'BIOCON.NS', 'BIRLACORPN.NS', 'BLUEDART.NS', 'BLUESTARCO.NS', 'BOSCHLTD.NS', 'BPCL.NS', 'BRIGADE.NS', 'BRITANNIA.NS', 'BSE.NS', 'BSOFT.NS', 'BURGERKING.NS', 'CADILAHC.NS', 'CAMS.NS', 'CANBK.NS', 'CANFINHOME.NS', 'CAPLIPOINT.NS', 'CARBORUNIV.NS', 'CASTROLIND.NS', 'CCL.NS', 'CDSL.NS', 'CEATLTD.NS', 'CENTRALBK.NS', 'CENTURYPLY.NS', 'CENTURYTEX.NS', 'CERA.NS', 'CESC.NS', 'CGCL.NS', 'CGPOWER.NS', 'CHALET.NS', 'CHAMBLFERT.NS', 'CHOLAFIN.NS', 'CHOLAHLDNG.NS', 'CIPLA.NS', 'COALINDIA.NS', 'COCHINSHIP.NS', 'COFORGE.NS', 'COLPAL.NS', 'CONCOR.NS', 'COROMANDEL.NS', 'CREDITACC.NS', 'CRISIL.NS', 'CROMPTON.NS', 'CSBBANK.NS', 'CUB.NS', 'CUMMINSIND.NS', 'CYIENT.NS', 'DABUR.NS', 'DALBHARAT.NS', 'DBL.NS', 'DCBBANK.NS', 'DCMSHRIRAM.NS', 'DEEPAKNTR.NS', 'DELTACORP.NS', 'DHANI.NS', 'DHANUKA.NS', 'DIVISLAB.NS', 'DIXON.NS', 'DLF.NS', 'DMART.NS', 'DRREDDY.NS', 'ECLERX.NS', 'EDELWEISS.NS', 'EICHERMOT.NS', 'EIDPARRY.NS', 'EIHOTEL.NS', 'ELGIEQUIP.NS', 'EMAMILTD.NS', 'ENDURANCE.NS', 'ENGINERSIN.NS', 'EPL.NS', 'EQUITAS.NS', 'EQUITASBNK.NS', 'ERIS.NS', 'ESCORTS.NS', 'EXIDEIND.NS', 'FACT.NS', 'FDC.NS', 'FEDERALBNK.NS', 'FINCABLES.NS', 'FINEORG.NS', 'FINPIPE.NS', 'FLUOROCHEM.NS', 'FORTIS.NS', 'FRETAIL.NS', 'FSL.NS', 'GAEL.NS', 'GAIL.NS', 'GALAXYSURF.NS', 'GARFIBRES.NS', 'GESHIP.NS', 'GICRE.NS', 'GILLETTE.NS', 'GLAND.NS', 'GLAXO.NS', 'GLENMARK.NS', 'GMMPFAUDLR.NS', 'GNFC.NS', 'GODFRYPHLP.NS', 'GODREJAGRO.NS', 'GODREJCP.NS', 'GODREJIND.NS', 'GODREJPROP.NS', 'GPPL.NS', 'GRANULES.NS', 'GRAPHITE.NS', 'GRASIM.NS', 'GREAVESCOT.NS', 'GRINDWELL.NS', 'GSFC.NS', 'GSPL.NS', 'GUJALKALI.NS', 'GUJGASLTD.NS', 'HAL.NS', 'HAPPSTMNDS.NS', 'HATHWAY.NS', 'HATSUN.NS', 'HAVELLS.NS', 'HCLTECH.NS', 'HDFC.NS', 'HDFCAMC.NS', 'HDFCBANK.NS', 'HDFCLIFE.NS', 'HEG.NS', 'HEIDELBERG.NS', 'HEMIPROP.NS', 'HEROMOTOCO.NS', 'HFCL.NS', 'HGS.NS', 'HIKAL.NS', 'HINDALCO.NS', 'HINDCOPPER.NS', 'HINDPETRO.NS', 'HINDUNILVR.NS', 'HINDZINC.NS', 'HOMEFIRST.NS', 'HONAUT.NS', 'HUDCO.NS', 'IBREALEST.NS', 'IBULHSGFIN.NS', 'ICICIBANK.NS', 'ICICIGI.NS', 'ICICIPRULI.NS', 'ICIL.NS', 'IDBI.NS', 'IDEA.NS', 'IDFC.NS', 'IDFCFIRSTB.NS', 'IEX.NS', 'IFBIND.NS', 'IGL.NS', 'IIFL.NS', 'IIFLWAM.NS', 'INDHOTEL.NS', 'INDIACEM.NS', 'INDIAMART.NS', 'INDIANB.NS', 'INDIGO.NS', 'INDIGOPNTS.NS', 'INDOCO.NS', 'INDUSINDBK.NS', 'INDUSTOWER.NS', 'INFIBEAM.NS', 'INFY.NS', 'INGERRAND.NS', 'INOXLEISUR.NS', 'INTELLECT.NS', 'IOB.NS', 'IOC.NS', 'IOLCP.NS', 'IPCALAB.NS', 'IRB.NS', 'IRCON.NS', 'IRCTC.NS', 'IRFC.NS', 'ISEC.NS', 'ITC.NS', 'ITI.NS', 'JAMNAAUTO.NS', 'JBCHEPHARM.NS', 'JCHAC.NS', 'JINDALSAW.NS', 'JINDALSTEL.NS', 'JKCEMENT.NS', 'JKLAKSHMI.NS', 'JKPAPER.NS', 'JKTYRE.NS', 'JMFINANCIL.NS', 'JSL.NS', 'JSLHISAR.NS', 'JSWENERGY.NS', 'JSWSTEEL.NS', 'JUBLFOOD.NS', 'JUBLINGREA.NS', 'JUBLPHARMA.NS', 'JUSTDIAL.NS', 'JYOTHYLAB.NS', 'KAJARIACER.NS', 'KALPATPOWR.NS', 'KALYANKJIL.NS', 'KANSAINER.NS', 'KARURVYSYA.NS', 'KEC.NS', 'KEI.NS', 'KNRCON.NS', 'KOTAKBANK.NS', 'KPITTECH.NS', 'KPRMILL.NS', 'KRBL.NS', 'KSB.NS', 'KSCL.NS', 'L&TFH.NS', 'LALPATHLAB.NS', 'LAOPALA.NS', 'LAURUSLABS.NS', 'LAXMIMACH.NS', 'LEMONTREE.NS', 'LICHSGFIN.NS', 'LINDEINDIA.NS', 'LODHA.NS', 'LT.NS', 'LTI.NS', 'LTTS.NS', 'LUPIN.NS', 'LUXIND.NS', 'LXCHEM.NS', 'M&M.NS', 'M&MFIN.NS', 'MAHABANK.NS', 'MAHINDCIE.NS', 'MAHLOG.NS', 'MANAPPURAM.NS', 'MARICO.NS', 'MARUTI.NS', 'MASTEK.NS', 'MAXHEALTH.NS', 'MAZDOCK.NS', 'MCDOWELL-N.NS', 'MCX.NS', 'METROPOLIS.NS', 'MFSL.NS', 'MGL.NS', 'MHRIL.NS', 'MIDHANI.NS', 'MINDACORP.NS', 'MINDAIND.NS', 'MINDTREE.NS', 'MMTC.NS', 'MOIL.NS', 'MOTILALOFS.NS', 'MPHASIS.NS', 'MRF.NS', 'MRPL.NS', 'MUTHOOTFIN.NS', 'NAM-INDIA.NS', 'NATCOPHARM.NS', 'NATIONALUM.NS', 'NAUKRI.NS', 'NAVINFLUOR.NS', 'NAZARA.NS', 'NBCC.NS', 'NCC.NS', 'NESCO.NS', 'NESTLEIND.NS', 'NETWORK18.NS', 'NFL.NS', 'NH.NS', 'NHPC.NS', 'NIACL.NS', 'NILKAMAL.NS', 'NLCINDIA.NS', 'NMDC.NS', 'NOCIL.NS', 'NTPC.NS', 'OBEROIRLTY.NS', 'OFSS.NS', 'OIL.NS', 'ONGC.NS', 'ORIENTELEC.NS', 'PAGEIND.NS', 'PEL.NS', 'PERSISTENT.NS', 'PETRONET.NS', 'PFC.NS', 'PFIZER.NS', 'PGHH.NS', 'PGHL.NS', 'PHILIPCARB.NS', 'PHOENIXLTD.NS', 'PIDILITIND.NS', 'PIIND.NS', 'PNB.NS', 'PNBHOUSING.NS', 'PNCINFRA.NS', 'POLYCAB.NS', 'POLYMED.NS', 'POLYPLEX.NS', 'POONAWALLA.NS', 'POWERGRID.NS', 'POWERINDIA.NS', 'PRAJIND.NS', 'PRESTIGE.NS', 'PRINCEPIPE.NS', 'PRSMJOHNSN.NS', 'PVR.NS', 'QUESS.NS', 'RADICO.NS', 'RAILTEL.NS', 'RAIN.NS', 'RAJESHEXPO.NS', 'RALLIS.NS', 'RAMCOCEM.NS', 'RATNAMANI.NS', 'RBLBANK.NS', 'RCF.NS', 'RECLTD.NS', 'REDINGTON.NS', 'RELAXO.NS', 'RELIANCE.NS', 'RHIM.NS', 'RITES.NS', 'ROSSARI.NS', 'ROUTE.NS', 'RVNL.NS', 'SAIL.NS', 'SANOFI.NS', 'SBICARD.NS', 'SBILIFE.NS', 'SBIN.NS', 'SCHAEFFLER.NS', 'SCHNEIDER.NS', 'SCI.NS', 'SEQUENT.NS', 'SFL.NS', 'SHARDACROP.NS', 'SHILPAMED.NS', 'SHREECEM.NS', 'SHRIRAMCIT.NS', 'SIEMENS.NS', 'SIS.NS', 'SJVN.NS', 'SKFINDIA.NS', 'SOBHA.NS', 'SOLARA.NS', 'SOLARINDS.NS', 'SONACOMS.NS', 'SONATSOFTW.NS', 'SPANDANA.NS', 'SPARC.NS', 'SPICEJET.NS', 'SRF.NS', 'SRTRANSFIN.NS', 'STAR.NS', 'STARCEMENT.NS', 'STLTECH.NS', 'SUDARSCHEM.NS', 'SUMICHEM.NS', 'SUNDARMFIN.NS', 'SUNDRMFAST.NS', 'SUNPHARMA.NS', 'SUNTECK.NS', 'SUNTV.NS', 'SUPPETRO.NS', 'SUPRAJIT.NS', 'SUPREMEIND.NS', 'SUVENPHAR.NS', 'SUZLON.NS', 'SWSOLAR.NS', 'SYMPHONY.NS', 'SYNGENE.NS', 'TANLA.NS', 'TASTYBITE.NS', 'TATACHEM.NS', 'TATACOFFEE.NS', 'TATACOMM.NS', 'TATACONSUM.NS', 'TATAELXSI.NS', 'TATAMOTORS.NS', 'TATAMTRDVR.NS', 'TATAPOWER.NS', 'TATASTEEL.NS', 'TATASTLLP.NS', 'TCIEXP.NS', 'TCNSBRANDS.NS', 'TCS.NS', 'TEAMLEASE.NS', 'TECHM.NS', 'THERMAX.NS', 'THYROCARE.NS', 'TIINDIA.NS', 'TIMKEN.NS', 'TITAN.NS', 'TORNTPHARM.NS', 'TORNTPOWER.NS', 'TRENT.NS', 'TRIDENT.NS', 'TRITURBINE.NS', 'TTKPRESTIG.NS', 'TTML.NS', 'TV18BRDCST.NS', 'TVSMOTOR.NS', 'UBL.NS', 'UCOBANK.NS', 'UFLEX.NS', 'UJJIVAN.NS', 'UJJIVANSFB.NS', 'ULTRACEMCO.NS', 'UNIONBANK.NS', 'UPL.NS', 'UTIAMC.NS', 'VAIBHAVGBL.NS', 'VAKRANGEE.NS', 'VALIANTORG.NS', 'VARROC.NS', 'VBL.NS', 'VEDL.NS', 'VENKEYS.NS', 'VGUARD.NS', 'VINATIORGA.NS', 'VIPIND.NS', 'VMART.NS', 'VOLTAS.NS', 'VTL.NS', 'WABCOINDIA.NS', 'WELCORP.NS', 'WELSPUNIND.NS', 'WESTLIFE.NS', 'WHIRLPOOL.NS', 'WIPRO.NS', 'WOCKPHARMA.NS', 'YESBANK.NS', 'ZEEL.NS', 'ZENSARTECH.NS', 'ZYDUSWELL.NS']\n\n\n# all_stocks_nifty = ['^NSEI', '20MICRONS.NS', '21STCENMGM.NS', '3IINFOLTD.NS', '3MINDIA.NS', '3PLAND.NS', '5PAISA.NS', '63MOONS.NS', 'A2ZINFRA.NS', 'AAKASH.NS', 'AAREYDRUGS.NS', 'AARON.NS', 'AARTIDRUGS.NS', 'AARTIIND.NS', 'AARTISURF.NS', 'AARVEEDEN.NS', 'AARVI.NS', 'AAVAS.NS', 'ABAN.NS', 'ABB.NS', 'ABBOTINDIA.NS', 'ABCAPITAL.NS', 'ABFRL.NS', 'ABMINTLLTD.NS', 'ABSLAMC.NS', 'ACC.NS', 'ACCELYA.NS', 'ACCURACY.NS', 'ACE.NS', 'ACRYSIL.NS', 'ADANIENT.NS', 'ADANIGREEN.NS', 'ADANIPORTS.NS', 'ADANIPOWER.NS', 'ADANITRANS.NS', 'ADFFOODS.NS', 'ADL.NS', 'ADORWELD.NS', 'ADROITINFO.NS', 'ADSL.NS', 'ADVANIHOTR.NS', 'ADVENZYMES.NS', 'AEGISCHEM.NS', 'AFFLE.NS', 'AGARIND.NS', 'AGRITECH.NS', 'AGROPHOS.NS', 'AHLADA.NS', 'AHLEAST.NS', 'AHLUCONT.NS', 'AIAENG.NS', 'AIRAN.NS', 'AIROLAM.NS', 'AJANTPHARM.NS', 'AJMERA.NS', 'AJOONI.NS', 'AJRINFRA.NS', 'AKASH.NS', 'AKG.NS', 'AKSHARCHEM.NS', 'AKSHOPTFBR.NS', 'AKZOINDIA.NS', 'ALANKIT.NS', 'ALBERTDAVD.NS', 'ALEMBICLTD.NS', 'ALICON.NS', 'ALKALI.NS', 'ALKEM.NS', 'ALKYLAMINE.NS', 'ALLCARGO.NS', 'ALLSEC.NS', 'ALMONDZ.NS', 'ALOKINDS.NS', 'ALPA.NS', 'ALPHAGEO.NS', 'AMARAJABAT.NS', 'AMBER.NS', 'AMBICAAGAR.NS', 'AMBIKCO.NS', 'AMBUJACEM.NS', 'AMDIND.NS', 'AMIORG.NS', 'AMJLAND.NS', 'AMRUTANJAN.NS', 'ANANDRATHI.NS', 'ANANTRAJ.NS', 'ANDHRACEMT.NS', 'ANDHRAPAP.NS', 'ANDHRSUGAR.NS', 'ANDREWYU.NS', 'ANGELONE.NS', 'ANIKINDS.NS', 'ANKITMETAL.NS', 'ANMOL.NS', 'ANSALAPI.NS', 'ANSALHSG.NS', 'ANTGRAPHIC.NS', 'ANUP.NS', 'ANURAS.NS', 'APARINDS.NS', 'APCL.NS', 'APCOTEXIND.NS', 'APEX.NS', 'APLAPOLLO.NS', 'APLLTD.NS', 'APOLLO.NS', 'APOLLOHOSP.NS', 'APOLLOPIPE.NS', 'APOLLOTYRE.NS', 'APOLSINHOT.NS', 'APTECHT.NS', 'APTUS.NS', 'ARCHIDPLY.NS', 'ARCHIES.NS', 'ARENTERP.NS', 'ARIES.NS', 'ARIHANT.NS', 'ARIHANTCAP.NS', 'ARIHANTSUP.NS', 'ARMANFIN.NS', 'AROGRANITE.NS', 'ARROWGREEN.NS', 'ARSHIYA.NS', 'ARSSINFRA.NS', 'ARTEMISMED.NS', 'ARTNIRMAN.NS', 'ARVEE.NS', 'ARVIND.NS', 'ARVINDFASN.NS', 'ARVSMART.NS', 'ASAHIINDIA.NS', 'ASAHISONG.NS', 'ASAL.NS', 'ASALCBR.NS', 'ASHAPURMIN.NS', 'ASHIANA.NS', 'ASHIMASYN.NS', 'ASHOKA.NS', 'ASHOKLEY.NS', 'ASIANENE.NS', 'ASIANHOTNR.NS', 'ASIANPAINT.NS', 'ASIANTILES.NS', 'ASPINWALL.NS', 'ASTEC.NS', 'ASTERDM.NS', 'ASTRAL.NS', 'ASTRAMICRO.NS', 'ASTRAZEN.NS', 'ASTRON.NS', 'ATFL.NS', 'ATGL.NS', 'ATLANTA.NS', 'ATUL.NS', 'ATULAUTO.NS', 'AUBANK.NS', 'AURIONPRO.NS', 'AUROPHARMA.NS', 'AURUM.NS', 'AUSOMENT.NS', 'AUTOAXLES.NS', 'AUTOIND.NS', 'AVADHSUGAR.NS', 'AVANTIFEED.NS', 'AVTNPL.NS', 'AWHCL.NS', 'AXISBANK.NS', 'AXISCADES.NS', 'AYMSYNTEX.NS', 'BAFNAPH.NS', 'BAGFILMS.NS', 'BAJAJ-AUTO.NS', 'BAJAJCON.NS', 'BAJAJELEC.NS', 'BAJAJFINSV.NS', 'BAJAJHCARE.NS', 'BAJAJHIND.NS', 'BAJAJHLDNG.NS', 'BAJFINANCE.NS', 'BALAJITELE.NS', 'BALAMINES.NS', 'BALAXI.NS', 'BALKRISHNA.NS', 'BALKRISIND.NS', 'BALLARPUR.NS', 'BALMLAWRIE.NS', 'BALPHARMA.NS', 'BALRAMCHIN.NS', 'BANARBEADS.NS', 'BANARISUG.NS', 'BANCOINDIA.NS', 'BANDHANBNK.NS', 'BANG.NS', 'BANKA.NS', 'BANKBARODA.NS', 'BANKINDIA.NS', 'BANSWRAS.NS', 'BARBEQUE.NS', 'BARTRONICS.NS', 'BASF.NS', 'BASML.NS', 'BATAINDIA.NS', 'BAYERCROP.NS', 'BBL.NS', 'BBOX.NS', 'BBTC.NS', 'BCG.NS', 'BCLIND.NS', 'BCP.NS', 'BDL.NS', 'BEARDSELL.NS', 'BECTORFOOD.NS', 'BEDMUTHA.NS', 'BEL.NS', 'BEML.NS', 'BEPL.NS', 'BERGEPAINT.NS', 'BESTAGRO.NS', 'BFINVEST.NS', 'BFUTILITIE.NS', 'BGLOBAL.NS', 'BGRENERGY.NS', 'BHAGCHEM.NS', 'BHAGERIA.NS', 'BHAGYANGR.NS', 'BHAGYAPROP.NS', 'BHANDARI.NS', 'BHARATFORG.NS', 'BHARATGEAR.NS', 'BHARATRAS.NS', 'BHARATWIRE.NS', 'BHARTIARTL.NS', 'BHEL.NS', 'BIGBLOC.NS', 'BIL.NS', 'BINDALAGRO.NS', 'BIOCON.NS', 'BIOFILCHEM.NS', 'BIRLACABLE.NS', 'BIRLACORPN.NS', 'BIRLAMONEY.NS', 'BIRLATYRE.NS', 'BKMINDST.NS', 'BLBLIMITED.NS', 'BLISSGVS.NS', 'BLKASHYAP.NS', 'BLS.NS', 'BLUECHIP.NS', 'BLUECOAST.NS', 'BLUEDART.NS', 'BLUESTARCO.NS', 'BODALCHEM.NS', 'BOMDYEING.NS', 'BOROLTD.NS', 'BORORENEW.NS', 'BOSCHLTD.NS', 'BPCL.NS', 'BPL.NS', 'BRFL.NS', 'BRIGADE.NS', 'BRITANNIA.NS', 'BRNL.NS', 'BROOKS.NS', 'BSE.NS', 'BSHSL.NS', 'BSL.NS', 'BSOFT.NS', 'BURGERKING.NS', 'BURNPUR.NS', 'BUTTERFLY.NS', 'BVCL.NS', 'BYKE.NS', 'CADILAHC.NS', 'CALSOFT.NS', 'CAMLINFINE.NS', 'CAMS.NS', 'CANBK.NS', 'CANDC.NS', 'CANFINHOME.NS', 'CANTABIL.NS', 'CAPACITE.NS', 'CAPLIPOINT.NS', 'CAPTRUST.NS', 'CARBORUNIV.NS', 'CAREERP.NS', 'CARERATING.NS', 'CARTRADE.NS', 'CASTROLIND.NS', 'CCCL.NS', 'CCHHL.NS', 'CCL.NS', 'CDSL.NS', 'CEATLTD.NS', 'CEBBCO.NS', 'CELEBRITY.NS', 'CENTENKA.NS', 'CENTEXT.NS', 'CENTRALBK.NS', 'CENTRUM.NS', 'CENTUM.NS', 'CENTURYPLY.NS', 'CENTURYTEX.NS', 'CERA.NS', 'CEREBRAINT.NS', 'CESC.NS', 'CGCL.NS', 'CGPOWER.NS', 'CHALET.NS', 'CHAMBLFERT.NS', 'CHEMBOND.NS', 'CHEMCON.NS', 'CHEMFAB.NS', 'CHEMPLASTS.NS', 'CHENNPETRO.NS', 'CHOLAFIN.NS', 'CHOLAHLDNG.NS', 'CIGNITITEC.NS', 'CINELINE.NS', 'CINEVISTA.NS', 'CIPLA.NS', 'CLEAN.NS', 'CLEDUCATE.NS', 'CLNINDIA.NS', 'CLSEL.NS', 'CMICABLES.NS', 'COALINDIA.NS', 'COASTCORP.NS', 'COCHINSHIP.NS', 'COFFEEDAY.NS', 'COFORGE.NS', 'COLPAL.NS', 'COMPINFO.NS', 'COMPUSOFT.NS', 'CONCOR.NS', 'CONFIPET.NS', 'CONSOFINVT.NS', 'CONTROLPR.NS', 'CORALFINAC.NS', 'CORDSCABLE.NS', 'COROMANDEL.NS', 'COSMOFILMS.NS', 'COUNCODOS.NS', 'COX&KINGS.NS', 'CRAFTSMAN.NS', 'CREATIVE.NS', 'CREATIVEYE.NS', 'CREDITACC.NS', 'CREST.NS', 'CRISIL.NS', 'CROMPTON.NS', 'CSBBANK.NS', 'CTE.NS', 'CUB.NS', 'CUBEXTUB.NS', 'CUMMINSIND.NS', 'CUPID.NS', 'CYBERMEDIA.NS', 'CYBERTECH.NS', 'CYIENT.NS', 'DAAWAT.NS', 'DABUR.NS', 'DALALSTCOM.NS', 'DALBHARAT.NS', 'DALMIASUG.NS', 'DAMODARIND.NS', 'DANGEE.NS', 'DATAMATICS.NS', 'DATAPATTNS.NS', 'DBCORP.NS', 'DBL.NS', 'DBREALTY.NS', 'DBSTOCKBRO.NS', 'DCAL.NS', 'DCBBANK.NS', 'DCM.NS', 'DCMFINSERV.NS', 'DCMNVL.NS', 'DCMSHRIRAM.NS', 'DCMSRIND.NS', 'DCW.NS', 'DECCANCE.NS', 'DEEPAKFERT.NS', 'DEEPAKNTR.NS', 'DEEPENR.NS', 'DEEPINDS.NS', 'DELPHIFX.NS', 'DELTACORP.NS', 'DELTAMAGNT.NS', 'DEN.NS', 'DENORA.NS', 'DEVYANI.NS', 'DFMFOODS.NS', 'DGCONTENT.NS', 'DHAMPURSUG.NS', 'DHANBANK.NS', 'DHANI.NS', 'DHANUKA.NS', 'DHARAMSI.NS', 'DHARSUGAR.NS', 'DHRUV.NS', 'DHUNINV.NS', 'DIAMONDYD.NS', 'DIAPOWER.NS', 'DICIND.NS', 'DIGISPICE.NS', 'DIGJAMLMTD.NS', 'DISHTV.NS', 'DIVISLAB.NS', 'DIXON.NS', 'DLF.NS', 'DLINKINDIA.NS', 'DMART.NS', 'DNAMEDIA.NS', 'DODLA.NS', 'DOLATALGO.NS', 'DOLLAR.NS', 'DONEAR.NS', 'DPABHUSHAN.NS', 'DPSCLTD.NS', 'DPWIRES.NS', 'DRCSYSTEMS.NS', 'DREDGECORP.NS', 'DRREDDY.NS', 'DSSL.NS', 'DTIL.NS', 'DUCON.NS', 'DVL.NS', 'DWARKESH.NS', 'DYNAMATECH.NS', 'DYNPRO.NS', 'EASEMYTRIP.NS', 'EASTSILK.NS', 'EASUNREYRL.NS', 'ECLERX.NS', 'EDELWEISS.NS', 'EDUCOMP.NS', 'EICHERMOT.NS', 'EIDPARRY.NS', 'EIFFL.NS', 'EIHAHOTELS.NS', 'EIHOTEL.NS', 'EIMCOELECO.NS', 'EKC.NS', 'ELECON.NS', 'ELECTCAST.NS', 'ELECTHERM.NS', 'ELGIEQUIP.NS', 'ELGIRUBCO.NS', 'EMAMILTD.NS', 'EMAMIPAP.NS', 'EMAMIREAL.NS', 'EMKAY.NS', 'EMMBI.NS', 'ENDURANCE.NS', 'ENERGYDEV.NS', 'ENGINERSIN.NS', 'ENIL.NS', 'EPL.NS', 'EQUIPPP.NS', 'EQUITAS.NS', 'EQUITASBNK.NS', 'ERIS.NS', 'EROSMEDIA.NS', 'ESABINDIA.NS', 'ESCORTS.NS', 'ESSARSHPNG.NS', 'ESTER.NS', 'EUROTEXIND.NS', 'EVEREADY.NS', 'EVERESTIND.NS', 'EXCEL.NS', 'EXCELINDUS.NS', 'EXIDEIND.NS', 'EXPLEOSOL.NS', 'EXXARO.NS', 'FACT.NS', 'FAIRCHEMOR.NS', 'FCL.NS', 'FCONSUMER.NS', 'FCSSOFT.NS', 'FDC.NS', 'FEDERALBNK.NS', 'FEL.NS', 'FELDVR.NS', 'FIEMIND.NS', 'FILATEX.NS', 'FINCABLES.NS', 'FINEORG.NS', 'FINOPB.NS', 'FINPIPE.NS', 'FLEXITUFF.NS', 'FLFL.NS', 'FLUOROCHEM.NS', 'FMGOETZE.NS', 'FMNL.NS', 'FOCUS.NS', 'FOODSIN.NS', 'FORCEMOT.NS', 'FORTIS.NS', 'FOSECOIND.NS', 'FRETAIL.NS', 'FSC.NS', 'FSL.NS', 'GABRIEL.NS', 'GAEL.NS', 'GAIL.NS', 'GAL.NS', 'GALAXYSURF.NS', 'GALLANTT.NS', 'GALLISPAT.NS', 'GANDHITUBE.NS', 'GANECOS.NS', 'GANESHBE.NS', 'GANESHHOUC.NS', 'GANGAFORGE.NS', 'GANGESSECU.NS', 'GANGOTRI.NS', 'GARFIBRES.NS', 'GATI.NS', 'GAYAHWS.NS', 'GAYAPROJ.NS', 'GDL.NS', 'GEECEE.NS', 'GEEKAYWIRE.NS', 'GENCON.NS', 'GENESYS.NS', 'GENUSPAPER.NS', 'GENUSPOWER.NS', 'GEOJITFSL.NS', 'GEPIL.NS', 'GESHIP.NS', 'GET&D.NS', 'GFLLIMITED.NS', 'GFSTEELS.NS', 'GHCL.NS', 'GICHSGFIN.NS', 'GICRE.NS', 'GILLANDERS.NS', 'GILLETTE.NS', 'GINNIFILA.NS', 'GIPCL.NS', 'GISOLUTION.NS', 'GKWLIMITED.NS', 'GLAND.NS', 'GLAXO.NS', 'GLENMARK.NS', 'GLFL.NS', 'GLOBAL.NS', 'GLOBALVECT.NS', 'GLOBE.NS', 'GLOBUSSPR.NS', 'GLS.NS', 'GMBREW.NS', 'GMDCLTD.NS', 'GMMPFAUDLR.NS', 'GMRINFRA.NS', 'GNA.NS', 'GNFC.NS', 'GOACARBON.NS', 'GOCLCORP.NS', 'GOCOLORS.NS', 'GODFRYPHLP.NS', 'GODHA.NS', 'GODREJAGRO.NS', 'GODREJCP.NS', 'GODREJIND.NS', 'GODREJPROP.NS', 'GOENKA.NS', 'GOKEX.NS', 'GOKUL.NS', 'GOKULAGRO.NS', 'GOLDENTOBC.NS', 'GOLDIAM.NS', 'GOLDTECH.NS', 'GOODLUCK.NS', 'GOODYEAR.NS', 'GPIL.NS', 'GPPL.NS', 'GPTINFRA.NS', 'GRANULES.NS', 'GRAPHITE.NS', 'GRASIM.NS', 'GRAUWEIL.NS', 'GRAVITA.NS', 'GREAVESCOT.NS', 'GREENLAM.NS', 'GREENPANEL.NS', 'GREENPLY.NS', 'GREENPOWER.NS', 'GRINDWELL.NS', 'GRINFRA.NS', 'GROBTEA.NS', 'GRPLTD.NS', 'GRSE.NS', 'GSCLCEMENT.NS', 'GSFC.NS', 'GSPL.NS', 'GSS.NS', 'GTL.NS', 'GTLINFRA.NS', 'GTNTEX.NS', 'GTPL.NS', 'GUFICBIO.NS', 'GUJALKALI.NS', 'GUJAPOLLO.NS', 'GUJGASLTD.NS', 'GUJRAFFIA.NS', 'GULFOILLUB.NS', 'GULFPETRO.NS', 'GULPOLY.NS', 'HAL.NS', 'HAPPSTMNDS.NS', 'HARRMALAYA.NS', 'HATHWAY.NS', 'HATSUN.NS', 'HAVELLS.NS', 'HAVISHA.NS', 'HBLPOWER.NS', 'HBSL.NS', 'HCC.NS', 'HCG.NS', 'HCL-INSYS.NS', 'HCLTECH.NS', 'HDFC.NS', 'HDFCAMC.NS', 'HDFCBANK.NS', 'HDFCLIFE.NS', 'HDIL.NS', 'HEG.NS', 'HEIDELBERG.NS', 'HEMIPROP.NS', 'HERANBA.NS', 'HERCULES.NS', 'HERITGFOOD.NS', 'HEROMOTOCO.NS', 'HESTERBIO.NS', 'HEXATRADEX.NS', 'HFCL.NS', 'HGINFRA.NS', 'HGS.NS', 'HIKAL.NS', 'HIL.NS', 'HILTON.NS', 'HIMATSEIDE.NS', 'HINDALCO.NS', 'HINDCOMPOS.NS', 'HINDCON.NS', 'HINDCOPPER.NS', 'HINDMOTORS.NS', 'HINDNATGLS.NS', 'HINDOILEXP.NS', 'HINDPETRO.NS', 'HINDUNILVR.NS', 'HINDZINC.NS', 'HIRECT.NS', 'HISARMETAL.NS', 'HITECH.NS', 'HITECHCORP.NS', 'HITECHGEAR.NS', 'HLEGLAS.NS', 'HLVLTD.NS', 'HMT.NS', 'HMVL.NS', 'HNDFDS.NS', 'HOMEFIRST.NS', 'HONAUT.NS', 'HONDAPOWER.NS', 'HOTELRUGBY.NS', 'HOVS.NS', 'HPAL.NS', 'HPL.NS', 'HSCL.NS', 'HSIL.NS', 'HTMEDIA.NS', 'HUBTOWN.NS', 'HUDCO.NS', 'HUHTAMAKI.NS', 'IBREALEST.NS', 'IBULHSGFIN.NS', 'ICDSLTD.NS', 'ICEMAKE.NS', 'ICICIBANK.NS', 'ICICIGI.NS', 'ICICIPRULI.NS', 'ICIL.NS', 'ICRA.NS', 'IDBI.NS', 'IDEA.NS', 'IDFC.NS', 'IDFCFIRSTB.NS', 'IEX.NS', 'IFBAGRO.NS', 'IFBIND.NS', 'IFCI.NS', 'IFGLEXPOR.NS', 'IGARASHI.NS', 'IGL.NS', 'IGPL.NS', 'IIFL.NS', 'IIFLSEC.NS', 'IIFLWAM.NS', 'IITL.NS', 'IL&FSENGG.NS', 'IL&FSTRANS.NS', 'IMAGICAA.NS', 'IMFA.NS', 'IMPAL.NS', 'IMPEXFERRO.NS', 'INCREDIBLE.NS', 'INDBANK.NS', 'INDHOTEL.NS', 'INDIACEM.NS', 'INDIAGLYCO.NS', 'INDIAMART.NS', 'INDIANB.NS', 'INDIANCARD.NS', 'INDIANHUME.NS', 'INDIGO.NS', 'INDIGOPNTS.NS', 'INDLMETER.NS', 'INDNIPPON.NS', 'INDOCO.NS', 'INDORAMA.NS', 'INDOSOLAR.NS', 'INDOSTAR.NS', 'INDOTECH.NS', 'INDOTHAI.NS', 'INDOWIND.NS', 'INDRAMEDCO.NS', 'INDSWFTLAB.NS', 'INDSWFTLTD.NS', 'INDTERRAIN.NS', 'INDUSINDBK.NS', 'INDUSTOWER.NS', 'INEOSSTYRO.NS', 'INFIBEAM.NS', 'INFOBEAN.NS', 'INFOMEDIA.NS', 'INFY.NS', 'INGERRAND.NS', 'INOXLEISUR.NS', 'INOXWIND.NS', 'INSECTICID.NS', 'INSPIRISYS.NS', 'INTEGRA.NS', 'INTELLECT.NS', 'INTENTECH.NS', 'INTLCONV.NS', 'INVENTURE.NS', 'IOB.NS', 'IOC.NS', 'IOLCP.NS', 'IPCALAB.NS', 'IPL.NS', 'IRB.NS', 'IRCON.NS', 'IRCTC.NS', 'IRFC.NS', 'IRIS.NS', 'IRISDOREME.NS', 'ISEC.NS', 'ISFT.NS', 'ISGEC.NS', 'ISMTLTD.NS', 'ITC.NS', 'ITDC.NS', 'ITDCEM.NS', 'ITI.NS', 'IVC.NS', 'IVP.NS', 'IWEL.NS', 'IZMO.NS', 'J&KBANK.NS', 'JAGRAN.NS', 'JAGSNPHARM.NS', 'JAIBALAJI.NS', 'JAICORPLTD.NS', 'JAINSTUDIO.NS', 'JAIPURKURT.NS', 'JAMNAAUTO.NS', 'JASH.NS', 'JAYAGROGN.NS', 'JAYBARMARU.NS', 'JAYNECOIND.NS', 'JAYSREETEA.NS', 'JBCHEPHARM.NS', 'JBFIND.NS', 'JBMA.NS', 'JCHAC.NS', 'JETAIRWAYS.NS', 'JETFREIGHT.NS', 'JHS.NS', 'JIKIND.NS', 'JINDALPHOT.NS', 'JINDALPOLY.NS', 'JINDALSAW.NS', 'JINDALSTEL.NS', 'JINDCOT.NS', 'JINDRILL.NS', 'JINDWORLD.NS', 'JISLDVREQS.NS', 'JISLJALEQS.NS', 'JITFINFRA.NS', 'JIYAECO.NS', 'JKCEMENT.NS', 'JKIL.NS', 'JKLAKSHMI.NS', 'JKPAPER.NS', 'JKTYRE.NS', 'JMA.NS', 'JMCPROJECT.NS', 'JMFINANCIL.NS', 'JOCIL.NS', 'JPASSOCIAT.NS', 'JPINFRATEC.NS', 'JPOLYINVST.NS', 'JPPOWER.NS', 'JSL.NS', 'JSLHISAR.NS', 'JSWENERGY.NS', 'JSWHL.NS', 'JSWISPL.NS', 'JSWSTEEL.NS', 'JTEKTINDIA.NS', 'JTLINFRA.NS', 'JUBLFOOD.NS', 'JUBLINDS.NS', 'JUBLINGREA.NS', 'JUBLPHARMA.NS', 'JUSTDIAL.NS', 'JYOTHYLAB.NS', 'JYOTISTRUC.NS', 'KABRAEXTRU.NS', 'KAJARIACER.NS', 'KAKATCEM.NS', 'KALPATPOWR.NS', 'KALYANI.NS', 'KALYANIFRG.NS', 'KALYANKJIL.NS', 'KAMATHOTEL.NS', 'KAMDHENU.NS', 'KANANIIND.NS', 'KANORICHEM.NS', 'KANPRPLA.NS', 'KANSAINER.NS', 'KAPSTON.NS', 'KARMAENG.NS', 'KARURVYSYA.NS', 'KAUSHALYA.NS', 'KAVVERITEL.NS', 'KAYA.NS', 'KBCGLOBAL.NS', 'KCP.NS', 'KCPSUGIND.NS', 'KDDL.NS', 'KEC.NS', 'KECL.NS', 'KEERTI.NS', 'KEI.NS', 'KELLTONTEC.NS', 'KENNAMET.NS', 'KERNEX.NS', 'KESORAMIND.NS', 'KEYFINSERV.NS', 'KHADIM.NS', 'KHAICHEM.NS', 'KHAITANLTD.NS', 'KHANDSE.NS', 'KICL.NS', 'KILITCH.NS', 'KIMS.NS', 'KINGFA.NS', 'KIOCL.NS', 'KIRIINDUS.NS', 'KIRLFER.NS', 'KIRLOSBROS.NS', 'KIRLOSENG.NS', 'KIRLOSIND.NS', 'KITEX.NS', 'KKCL.NS', 'KMSUGAR.NS', 'KNRCON.NS', 'KOKUYOCMLN.NS', 'KOLTEPATIL.NS', 'KOPRAN.NS', 'KOTAKBANK.NS', 'KOTARISUG.NS', 'KOTHARIPET.NS', 'KOTHARIPRO.NS', 'KOVAI.NS', 'KPIGLOBAL.NS', 'KPITTECH.NS', 'KPRMILL.NS', 'KRBL.NS', 'KREBSBIO.NS', 'KRIDHANINF.NS', 'KRISHANA.NS', 'KRITI.NS', 'KRSNAA.NS', 'KSB.NS', 'KSCL.NS', 'KSL.NS', 'KTKBANK.NS', 'KUANTUM.NS', 'L&TFH.NS', 'LAGNAM.NS', 'LAKPRE.NS', 'LALPATHLAB.NS', 'LAMBODHARA.NS', 'LAOPALA.NS', 'LASA.NS', 'LATENTVIEW.NS', 'LAURUSLABS.NS', 'LAXMICOT.NS', 'LAXMIMACH.NS', 'LCCINFOTEC.NS', 'LEMONTREE.NS', 'LFIC.NS', 'LGBBROSLTD.NS', 'LGBFORGE.NS', 'LIBAS.NS', 'LIBERTSHOE.NS', 'LICHSGFIN.NS', 'LIKHITHA.NS', 'LINC.NS', 'LINCOLN.NS', 'LINDEINDIA.NS', 'LODHA.NS', 'LOKESHMACH.NS', 'LOTUSEYE.NS', 'LOVABLE.NS', 'LPDC.NS', 'LSIL.NS', 'LT.NS', 'LTI.NS', 'LTTS.NS', 'LUMAXIND.NS', 'LUMAXTECH.NS', 'LUPIN.NS', 'LUXIND.NS', 'LXCHEM.NS', 'LYKALABS.NS', 'LYPSAGEMS.NS', 'M&M.NS', 'M&MFIN.NS', 'MAANALU.NS', 'MACPOWER.NS', 'MADHAV.NS', 'MADHUCON.NS', 'MADRASFERT.NS', 'MAGADSUGAR.NS', 'MAGNUM.NS', 'MAHABANK.NS', 'MAHAPEXLTD.NS', 'MAHASTEEL.NS', 'MAHEPC.NS', 'MAHESHWARI.NS', 'MAHINDCIE.NS', 'MAHLIFE.NS', 'MAHLOG.NS', 'MAHSCOOTER.NS', 'MAHSEAMLES.NS', 'MAITHANALL.NS', 'MALUPAPER.NS', 'MANAKALUCO.NS', 'MANAKCOAT.NS', 'MANAKSIA.NS', 'MANAKSTEEL.NS', 'MANALIPETC.NS', 'MANAPPURAM.NS', 'MANGALAM.NS', 'MANGCHEFER.NS', 'MANGLMCEM.NS', 'MANGTIMBER.NS', 'MANINDS.NS', 'MANINFRA.NS', 'MANORG.NS', 'MANUGRAPH.NS', 'MAPMYINDIA.NS', 'MARALOVER.NS', 'MARATHON.NS', 'MARICO.NS', 'MARINE.NS', 'MARKSANS.NS', 'MARSHALL.NS', 'MARUTI.NS', 'MASFIN.NS', 'MASKINVEST.NS', 'MASTEK.NS', 'MATRIMONY.NS', 'MAWANASUG.NS', 'MAXHEALTH.NS', 'MAXIND.NS', 'MAXVIL.NS', 'MAYURUNIQ.NS', 'MAZDA.NS', 'MAZDOCK.NS', 'MBAPL.NS', 'MBECL.NS', 'MBLINFRA.NS', 'MCDHOLDING.NS', 'MCDOWELL-N.NS', 'MCL.NS', 'MCLEODRUSS.NS', 'MCX.NS', 'MEDICAMEQ.NS', 'MEDPLUS.NS', 'MEGASOFT.NS', 'MELSTAR.NS', 'MENONBE.NS', 'MEP.NS', 'MERCATOR.NS', 'METALFORGE.NS', 'METROBRAND.NS', 'METROPOLIS.NS', 'MFL.NS', 'MFSL.NS', 'MGEL.NS', 'MGL.NS', 'MHRIL.NS', 'MICEL.NS', 'MIDHANI.NS', 'MINDACORP.NS', 'MINDAIND.NS', 'MINDTECK.NS', 'MINDTREE.NS', 'MIRCELECTR.NS', 'MIRZAINT.NS', 'MITTAL.NS', 'MMFL.NS', 'MMP.NS', 'MMTC.NS', 'MODIRUBBER.NS', 'MODISNME.NS', 'MOHITIND.NS', 'MOHOTAIND.NS', 'MOIL.NS', 'MOKSH.NS', 'MOL.NS', 'MOLDTECH.NS', 'MOLDTKPAC.NS', 'MONTECARLO.NS', 'MORARJEE.NS', 'MOREPENLAB.NS', 'MOTHERSUMI.NS', 'MOTILALOFS.NS', 'MOTOGENFIN.NS', 'MPHASIS.NS', 'MPSLTD.NS', 'MRF.NS', 'MRO-TEK.NS', 'MRPL.NS', 'MSPL.NS', 'MSTCLTD.NS', 'MTARTECH.NS', 'MTEDUCARE.NS', 'MTNL.NS', 'MUKANDENGG.NS', 'MUKANDLTD.NS', 'MUKTAARTS.NS', 'MUNJALAU.NS', 'MUNJALSHOW.NS', 'MURUDCERA.NS', 'MUTHOOTCAP.NS', 'MUTHOOTFIN.NS', 'NACLIND.NS', 'NAGAFERT.NS', 'NAGREEKCAP.NS', 'NAGREEKEXP.NS', 'NAHARCAP.NS', 'NAHARINDUS.NS', 'NAHARPOLY.NS', 'NAHARSPING.NS', 'NAM-INDIA.NS', 'NATCOPHARM.NS', 'NATHBIOGEN.NS', 'NATIONALUM.NS', 'NATNLSTEEL.NS', 'NAUKRI.NS', 'NAVINFLUOR.NS', 'NAVKARCORP.NS', 'NAVNETEDUL.NS', 'NAZARA.NS', 'NBCC.NS', 'NBIFIN.NS', 'NBVENTURES.NS', 'NCC.NS', 'NCLIND.NS', 'NDGL.NS', 'NDL.NS', 'NDRAUTO.NS', 'NDTV.NS', 'NECCLTD.NS', 'NECLIFE.NS', 'NELCAST.NS', 'NELCO.NS', 'NEOGEN.NS', 'NESCO.NS', 'NESTLEIND.NS', 'NETWORK18.NS', 'NEULANDLAB.NS', 'NEWGEN.NS', 'NEXTMEDIA.NS', 'NFL.NS', 'NGIL.NS', 'NH.NS', 'NHPC.NS', 'NIACL.NS', 'NIBL.NS', 'NIITLTD.NS', 'NILAINFRA.NS', 'NILASPACES.NS', 'NILKAMAL.NS', 'NIPPOBATRY.NS', 'NIRAJ.NS', 'NIRAJISPAT.NS', 'NITCO.NS', 'NITINFIRE.NS', 'NITINSPIN.NS', 'NITIRAJ.NS', 'NKIND.NS', 'NLCINDIA.NS', 'NMDC.NS', 'NOCIL.NS', 'NOIDATOLL.NS', 'NORBTEAEXP.NS', 'NOVARTIND.NS', 'NRAIL.NS', 'NRBBEARING.NS', 'NSIL.NS', 'NTL.NS', 'NTPC.NS', 'NUCLEUS.NS', 'NURECA.NS', 'NUVOCO.NS', 'NXTDIGITAL.NS', 'NYKAA.NS', 'OAL.NS', 'OBEROIRLTY.NS', 'OCCL.NS', 'OFSS.NS', 'OIL.NS', 'OILCOUNTUB.NS', 'OLECTRA.NS', 'OMAXAUTO.NS', 'OMAXE.NS', 'OMINFRAL.NS', 'OMKARCHEM.NS', 'ONELIFECAP.NS', 'ONEPOINT.NS', 'ONGC.NS', 'ONMOBILE.NS', 'ONWARDTEC.NS', 'OPTIEMUS.NS', 'OPTOCIRCUI.NS', 'ORBTEXP.NS', 'ORCHPHARMA.NS', 'ORICONENT.NS', 'ORIENTABRA.NS', 'ORIENTALTL.NS', 'ORIENTBELL.NS', 'ORIENTCEM.NS', 'ORIENTELEC.NS', 'ORIENTHOT.NS', 'ORIENTLTD.NS', 'ORIENTPPR.NS', 'ORISSAMINE.NS', 'ORTEL.NS', 'ORTINLAB.NS', 'OSWALAGRO.NS', 'PAEL.NS', 'PAGEIND.NS', 'PAISALO.NS', 'PALASHSECU.NS', 'PALREDTEC.NS', 'PANACEABIO.NS', 'PANACHE.NS', 'PANAMAPET.NS', 'PANSARI.NS', 'PAR.NS', 'PARACABLES.NS', 'PARAGMILK.NS', 'PARAS.NS', 'PARSVNATH.NS', 'PASUPTAC.NS', 'PATELENG.NS', 'PATINTLOG.NS', 'PATINTPP.NS', 'PATSPINLTD.NS', 'PAYTM.NS', 'PBAINFRA.NS', 'PCJEWELLER.NS', 'PDMJEPAPER.NS', 'PDPL.NS', 'PDSMFL.NS', 'PEARLPOLY.NS', 'PEL.NS', 'PENIND.NS', 'PENINLAND.NS', 'PERSISTENT.NS', 'PETRONET.NS', 'PFC.NS', 'PFIZER.NS', 'PFOCUS.NS', 'PFS.NS', 'PGEL.NS', 'PGHH.NS', 'PGHL.NS', 'PGIL.NS', 'PHILIPCARB.NS', 'PHOENIXLTD.NS', 'PIDILITIND.NS', 'PIIND.NS', 'PILANIINVS.NS', 'PILITA.NS', 'PIONDIST.NS', 'PIONEEREMB.NS', 'PITTIENG.NS', 'PKTEA.NS', 'PLASTIBLEN.NS', 'PNB.NS', 'PNBGILTS.NS', 'PNBHOUSING.NS', 'PNC.NS', 'PNCINFRA.NS', 'PODDARHOUS.NS', 'PODDARMENT.NS', 'POKARNA.NS', 'POLICYBZR.NS', 'POLYCAB.NS', 'POLYMED.NS', 'POLYPLEX.NS', 'PONNIERODE.NS', 'POONAWALLA.NS', 'POWERGRID.NS', 'POWERINDIA.NS', 'POWERMECH.NS', 'PPAP.NS', 'PPL.NS', 'PRAENG.NS', 'PRAJIND.NS', 'PRAKASH.NS', 'PRAKASHSTL.NS', 'PRAXIS.NS', 'PRECAM.NS', 'PRECOT.NS', 'PRECWIRE.NS', 'PREMEXPLN.NS', 'PREMIER.NS', 'PREMIERPOL.NS', 'PRESSMN.NS', 'PRESTIGE.NS', 'PRICOLLTD.NS', 'PRIMESECU.NS', 'PRINCEPIPE.NS', 'PRITIKAUTO.NS', 'PRIVISCL.NS', 'PROZONINTU.NS', 'PRSMJOHNSN.NS', 'PSB.NS', 'PSPPROJECT.NS', 'PTC.NS', 'PTL.NS', 'PUNJABCHEM.NS', 'PUNJLLOYD.NS', 'PURVA.NS', 'PVP.NS', 'PVR.NS', 'QUESS.NS', 'QUICKHEAL.NS', 'QUINTEGRA.NS', 'RADAAN.NS', 'RADICO.NS', 'RADIOCITY.NS', 'RAILTEL.NS', 'RAIN.NS', 'RAJESHEXPO.NS', 'RAJMET.NS', 'RAJRATAN.NS', 'RAJSREESUG.NS', 'RAJTV.NS', 'RAJVIR.NS', 'RALLIS.NS', 'RAMANEWS.NS', 'RAMASTEEL.NS', 'RAMCOCEM.NS', 'RAMCOIND.NS', 'RAMCOSYS.NS', 'RAMKY.NS', 'RANASUG.NS', 'RANEENGINE.NS', 'RANEHOLDIN.NS', 'RATEGAIN.NS', 'RATNAMANI.NS', 'RAYMOND.NS', 'RBL.NS', 'RBLBANK.NS', 'RCF.NS', 'RCOM.NS', 'RECLTD.NS', 'REDINGTON.NS', 'REFEX.NS', 'REGENCERAM.NS', 'RELAXO.NS', 'RELCAPITAL.NS', 'RELIANCE.NS', 'RELIGARE.NS', 'RELINFRA.NS', 'REMSONSIND.NS', 'RENUKA.NS', 'REPCOHOME.NS', 'REPL.NS', 'REPRO.NS', 'RESPONIND.NS', 'REVATHI.NS', 'RGL.NS', 'RHFL.NS', 'RHIM.NS', 'RICOAUTO.NS', 'RIIL.NS', 'RITES.NS', 'RKDL.NS', 'RKEC.NS', 'RKFORGE.NS', 'RMCL.NS', 'RML.NS', 'RNAVAL.NS', 'ROHITFERRO.NS', 'ROHLTD.NS', 'ROLEXRINGS.NS', 'ROLLT.NS', 'ROLTA.NS', 'ROML.NS', 'ROSSARI.NS', 'ROSSELLIND.NS', 'ROUTE.NS', 'RPGLIFE.NS', 'RPOWER.NS', 'RPPINFRA.NS', 'RPPL.NS', 'RPSGVENT.NS', 'RSSOFTWARE.NS', 'RSWM.NS', 'RSYSTEMS.NS', 'RTNINDIA.NS', 'RTNPOWER.NS', 'RUBYMILLS.NS', 'RUCHI.NS', 'RUCHINFRA.NS', 'RUCHIRA.NS', 'RUPA.NS', 'RUSHIL.NS', 'RVHL.NS', 'RVNL.NS', 'S&SPOWER.NS', 'SABEVENTS.NS', 'SABTN.NS', 'SADBHAV.NS', 'SADBHIN.NS', 'SAFARI.NS', 'SAGARDEEP.NS', 'SAGCEM.NS', 'SAIL.NS', 'SAKAR.NS', 'SAKHTISUG.NS', 'SAKSOFT.NS', 'SAKUMA.NS', 'SALASAR.NS', 'SALONA.NS', 'SALSTEEL.NS', 'SALZERELEC.NS', 'SAMBHAAV.NS', 'SANCO.NS', 'SANDESH.NS', 'SANDHAR.NS', 'SANGAMIND.NS', 'SANGHIIND.NS', 'SANGHVIMOV.NS', 'SANGINITA.NS', 'SANOFI.NS', 'SANSERA.NS', 'SANWARIA.NS', 'SAPPHIRE.NS', 'SARDAEN.NS', 'SAREGAMA.NS', 'SARLAPOLY.NS', 'SASKEN.NS', 'SASTASUNDR.NS', 'SATHAISPAT.NS', 'SATIA.NS', 'SATIN.NS', 'SBC.NS', 'SBCL.NS', 'SBICARD.NS', 'SBILIFE.NS', 'SBIN.NS', 'SCAPDVR.NS', 'SCHAEFFLER.NS', 'SCHAND.NS', 'SCHNEIDER.NS', 'SCI.NS', 'SDBL.NS', 'SEAMECLTD.NS', 'SECURKLOUD.NS', 'SEJALLTD.NS', 'SELAN.NS', 'SELMC.NS', 'SEPOWER.NS', 'SEQUENT.NS', 'SERVOTECH.NS', 'SESHAPAPER.NS', 'SETCO.NS', 'SETUINFRA.NS', 'SEYAIND.NS', 'SFL.NS', 'SGIL.NS', 'SGL.NS', 'SHAHALLOYS.NS', 'SHAKTIPUMP.NS', 'SHALBY.NS', 'SHALPAINTS.NS', 'SHANKARA.NS', 'SHANTI.NS', 'SHANTIGEAR.NS', 'SHARDACROP.NS', 'SHARDAMOTR.NS', 'SHAREINDIA.NS', 'SHEMAROO.NS', 'SHIL.NS', 'SHILPAMED.NS', 'SHIVALIK.NS', 'SHIVAMAUTO.NS', 'SHIVAMILLS.NS', 'SHIVATEX.NS', 'SHK.NS', 'SHOPERSTOP.NS', 'SHRADHA.NS', 'SHREDIGCEM.NS', 'SHREECEM.NS', 'SHREEPUSHK.NS', 'SHREERAMA.NS', 'SHRENIK.NS', 'SHREYANIND.NS', 'SHREYAS.NS', 'SHRIPISTON.NS', 'SHRIRAMCIT.NS', 'SHRIRAMEPC.NS', 'SHRIRAMPPS.NS', 'SHYAMCENT.NS', 'SHYAMMETL.NS', 'SHYAMTEL.NS', 'SICAL.NS', 'SIEMENS.NS', 'SIGACHI.NS', 'SIGIND.NS', 'SIKKO.NS', 'SIL.NS', 'SILGO.NS', 'SILINV.NS', 'SILLYMONKS.NS', 'SIMBHALS.NS', 'SIMPLEXINF.NS', 'SINTERCOM.NS', 'SINTEX.NS', 'SIRCA.NS', 'SIS.NS', 'SITINET.NS', 'SIYSIL.NS', 'SJS.NS', 'SJVN.NS', 'SKFINDIA.NS', 'SKIL.NS', 'SKIPPER.NS', 'SKMEGGPROD.NS', 'SMARTLINK.NS', 'SMCGLOBAL.NS', 'SMLISUZU.NS', 'SMLT.NS', 'SMSLIFE.NS', 'SMSPHARMA.NS', 'SNOWMAN.NS', 'SOBHA.NS', 'SOLARA.NS', 'SOLARINDS.NS', 'SOMANYCERA.NS', 'SOMATEX.NS', 'SOMICONVEY.NS', 'SONACOMS.NS', 'SONATSOFTW.NS', 'SORILINFRA.NS', 'SOTL.NS', 'SOUTHBANK.NS', 'SOUTHWEST.NS', 'SPAL.NS', 'SPANDANA.NS', 'SPARC.NS', 'SPCENET.NS', 'SPECIALITY.NS', 'SPENCERS.NS', 'SPENTEX.NS', 'SPIC.NS', 'SPICEJET.NS', 'SPLIL.NS', 'SPMLINFRA.NS', 'SPTL.NS', 'SPYL.NS', 'SREEL.NS', 'SREINFRA.NS', 'SRF.NS', 'SRHHYPOLTD.NS', 'SRIPIPES.NS', 'SRPL.NS', 'SRTRANSFIN.NS', 'SSWL.NS', 'STAMPEDE.NS', 'STAR.NS', 'STARCEMENT.NS', 'STARHEALTH.NS', 'STARPAPER.NS', 'STCINDIA.NS', 'STEELCAS.NS', 'STEELCITY.NS', 'STEELXIND.NS', 'STEL.NS', 'STERTOOLS.NS', 'STLTECH.NS', 'STOVEKRAFT.NS', 'STYLAMIND.NS', 'SUBCAPCITY.NS', 'SUBEXLTD.NS', 'SUBROS.NS', 'SUDARSCHEM.NS', 'SUMEETINDS.NS', 'SUMICHEM.NS', 'SUMIT.NS', 'SUMMITSEC.NS', 'SUNCLAYLTD.NS', 'SUNDARAM.NS', 'SUNDARMFIN.NS', 'SUNDARMHLD.NS', 'SUNDRMBRAK.NS', 'SUNDRMFAST.NS', 'SUNFLAG.NS', 'SUNPHARMA.NS', 'SUNTECK.NS', 'SUNTV.NS', 'SUPERHOUSE.NS', 'SUPERSPIN.NS', 'SUPPETRO.NS', 'SUPRAJIT.NS', 'SUPREMEENG.NS', 'SUPREMEIND.NS', 'SUPREMEINF.NS', 'SURANASOL.NS', 'SURANAT&P.NS', 'SURYALAXMI.NS', 'SURYAROSNI.NS', 'SURYODAY.NS', 'SUTLEJTEX.NS', 'SUULD.NS', 'SUVEN.NS', 'SUVENPHAR.NS', 'SUVIDHAA.NS', 'SUZLON.NS', 'SVPGLOB.NS', 'SWANENERGY.NS', 'SWARAJENG.NS', 'SWELECTES.NS', 'SWSOLAR.NS', 'SYMPHONY.NS', 'SYNGENE.NS', 'TAINWALCHM.NS', 'TAJGVK.NS', 'TAKE.NS', 'TALBROAUTO.NS', 'TANLA.NS', 'TANTIACONS.NS', 'TARAPUR.NS', 'TARC.NS', 'TARMAT.NS', 'TARSONS.NS', 'TASTYBITE.NS', 'TATACHEM.NS', 'TATACOFFEE.NS', 'TATACOMM.NS', 'TATACONSUM.NS', 'TATAELXSI.NS', 'TATAINVEST.NS', 'TATAMETALI.NS', 'TATAMOTORS.NS', 'TATAMTRDVR.NS', 'TATAPOWER.NS', 'TATASTEEL.NS', 'TATASTLLP.NS', 'TATVA.NS', 'TBZ.NS', 'TCI.NS', 'TCIDEVELOP.NS', 'TCIEXP.NS', 'TCIFINANCE.NS', 'TCNSBRANDS.NS', 'TCPLPACK.NS', 'TCS.NS', 'TDPOWERSYS.NS', 'TEAMLEASE.NS', 'TECHIN.NS', 'TECHM.NS', 'TECHNOE.NS', 'TEGA.NS', 'TEJASNET.NS', 'TEMBO.NS', 'TERASOFT.NS', 'TEXINFRA.NS', 'TEXMOPIPES.NS', 'TEXRAIL.NS', 'TFCILTD.NS', 'TFL.NS', 'TGBHOTELS.NS', 'THANGAMAYL.NS', 'THEINVEST.NS', 'THEMISMED.NS', 'THERMAX.NS', 'THOMASCOOK.NS', 'THOMASCOTT.NS', 'THYROCARE.NS', 'TI.NS', 'TIDEWATER.NS', 'TIIL.NS', 'TIINDIA.NS', 'TIJARIA.NS', 'TIL.NS', 'TIMESGTY.NS', 'TIMETECHNO.NS', 'TIMKEN.NS', 'TINPLATE.NS', 'TIPSINDLTD.NS', 'TIRUMALCHM.NS', 'TIRUPATIFL.NS', 'TITAN.NS', 'TMRVL.NS', 'TNPETRO.NS', 'TNPL.NS', 'TNTELE.NS', 'TOKYOPLAST.NS', 'TORNTPHARM.NS', 'TORNTPOWER.NS', 'TOTAL.NS', 'TOUCHWOOD.NS', 'TPLPLASTEH.NS', 'TREEHOUSE.NS', 'TREJHARA.NS', 'TRENT.NS', 'TRF.NS', 'TRIDENT.NS', 'TRIGYN.NS', 'TRIL.NS', 'TRITURBINE.NS', 'TRIVENI.NS', 'TTKHLTCARE.NS', 'TTKPRESTIG.NS', 'TTL.NS', 'TTML.NS', 'TV18BRDCST.NS', 'TVSELECT.NS', 'TVSMOTOR.NS', 'TVSSRICHAK.NS', 'TVTODAY.NS', 'TVVISION.NS', 'TWL.NS', 'UBL.NS', 'UCALFUEL.NS', 'UCOBANK.NS', 'UDAICEMENT.NS', 'UFLEX.NS', 'UFO.NS', 'UGARSUGAR.NS', 'UGROCAP.NS', 'UJAAS.NS', 'UJJIVAN.NS', 'UJJIVANSFB.NS', 'ULTRACEMCO.NS', 'UMANGDAIRY.NS', 'UMESLTD.NS', 'UNICHEMLAB.NS', 'UNIDT.NS', 'UNIENTER.NS', 'UNIONBANK.NS', 'UNITECH.NS', 'UNITEDTEA.NS', 'UNIVASTU.NS', 'UNIVCABLES.NS', 'UNIVPHOTO.NS', 'UPL.NS', 'URJA.NS', 'USHAMART.NS', 'UTIAMC.NS', 'UTTAMSTL.NS', 'UTTAMSUGAR.NS', 'V2RETAIL.NS', 'VADILALIND.NS', 'VAIBHAVGBL.NS', 'VAISHALI.NS', 'VAKRANGEE.NS', 'VALIANTORG.NS', 'VARDHACRLC.NS', 'VARDMNPOLY.NS', 'VARROC.NS', 'VASCONEQ.NS', 'VASWANI.NS', 'VBL.NS', 'VEDL.NS', 'VENKEYS.NS', 'VENUSREM.NS', 'VERTOZ.NS', 'VESUVIUS.NS', 'VETO.NS', 'VGUARD.NS', 'VHL.NS', 'VICEROY.NS', 'VIDHIING.NS', 'VIJAYA.NS', 'VIJIFIN.NS', 'VIKASECO.NS', 'VIKASLIFE.NS', 'VIKASPROP.NS', 'VIKASWSP.NS', 'VIMTALABS.NS', 'VINATIORGA.NS', 'VINDHYATEL.NS', 'VINEETLAB.NS', 'VINYLINDIA.NS', 'VIPCLOTHNG.NS', 'VIPIND.NS', 'VIPULLTD.NS', 'VISAKAIND.NS', 'VISASTEEL.NS', 'VISESHINFO.NS', 'VISHAL.NS', 'VISHNU.NS', 'VISHWARAJ.NS', 'VIVIDHA.NS', 'VIVIMEDLAB.NS', 'VLIFEPP.NS', 'VLSFINANCE.NS', 'VMART.NS', 'VOLTAMP.NS', 'VOLTAS.NS', 'VRLLOG.NS', 'VSSL.NS', 'VSTIND.NS', 'VSTTILLERS.NS', 'VTL.NS', 'WABAG.NS', 'WABCOINDIA.NS', 'WALCHANNAG.NS', 'WANBURY.NS', 'WATERBASE.NS', 'WEALTH.NS', 'WEBELSOLAR.NS', 'WEIZMANIND.NS', 'WELCORP.NS', 'WELENT.NS', 'WELINV.NS', 'WELSPUNIND.NS', 'WENDT.NS', 'WESTLIFE.NS', 'WHEELS.NS', 'WHIRLPOOL.NS', 'WILLAMAGOR.NS', 'WINDLAS.NS', 'WINDMACHIN.NS', 'WINPRO.NS', 'WIPL.NS', 'WIPRO.NS', 'WOCKPHARMA.NS', 'WONDERLA.NS', 'WORTH.NS', 'WSI.NS', 'WSTCSTPAPR.NS', 'XCHANGING.NS', 'XELPMOC.NS', 'XPROINDIA.NS', 'YAARI.NS', 'YESBANK.NS', 'YUKEN.NS', 'ZEEL.NS', 'ZEELEARN.NS', 'ZEEMEDIA.NS', 'ZENITHEXPO.NS', 'ZENITHSTL.NS', 'ZENSARTECH.NS', 'ZENTEC.NS', 'ZODIAC.NS', 'ZODIACLOTH.NS', 'ZOMATO.NS', 'ZOTA.NS', 'ZUARI.NS', 'ZUARIGLOB.NS', 'ZYDUSWELL.NS']\n\n# print(\"len(all_stocks_nifty):\", len(all_stocks_nifty))\n\n\n# delisted = ['MAGMA.NS']\n# # Entries after MRO.NS need to be reassessed\n# # Last 5 entries do not have right data in yahoo finance\n# len(delisted)\n\n# all_stocks_nifty = [i for i in all_stocks_nifty if i not in delisted]\n# len(all_stocks_nifty)\n\nall_stock_codes = nse.get_stock_codes()\n#print([x + \".NS\" for x in (all_stock_codes.keys())][1:])\n\ntickers=['^NSEI'] + [x + \".NS\" for x in (all_stock_codes.keys())][1:] #nifty500\n#Remove duplicates if any\n#tickers = list(dict.fromkeys(tickers))\nprint(tickers)\ndata_directory = '../data/'\nbad_tickers =[]\n\ndef clean_dataset(df):\n assert isinstance(df, pd.DataFrame), \"df needs to be a pd.DataFrame\"\n df.dropna(inplace=True)\n indices_to_keep = ~df.isin([np.nan, np.inf, -np.inf]).any(1)\n df_ret = df[indices_to_keep].astype(np.float64).round(5)\n #print(\"Changed from {} to {}\".format(df.shape, df_ret.shape))\n return df_ret\n\ndef dateparse (time_in_secs): \n return datetime.datetime.strptime(time_in_secs, \"%Y-%m-%d\")\n\n\n# %%\n################## Takes ~120m to execute [download from yahoo] or 40 secs to read from already downloaded files of all NIFTY stocks [~1800] ######################\n\n\ndef download_and_save_stock_info(stock, data_directory):\n i = stock\n print(\"Download data for:\", i)\n stock_details = yf.Ticker(i)\n s = stock_details.get_info()\n\n sd = pd.DataFrame.from_dict(data=s, orient='index', columns=[i])\n if ((sd) is not None):\n sd.to_csv(data_directory + i + '_info.csv', header=False)\n else:\n print(\"No sd (stock_details) for:\", stock)\n\n if ((stock_details.get_financials()) is not None):\n if (len(stock_details.get_financials().loc['Total Revenue']) > 0):\n stock_details.get_financials().to_csv(data_directory + i + '_financials.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n else:\n print(\"No real stock_details.get_financials() for:\", stock)\n else:\n print(\"No stock_details.get_financials() for:\", stock)\n if ((stock_details.quarterly_financials) is not None):\n (stock_details.quarterly_financials).to_csv(data_directory + i + '_quarterly_financials.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n else:\n print(\"No stock_details.quarterly_financials for:\", stock)\n if ((stock_details.quarterly_balancesheet) is not None):\n (stock_details.quarterly_balancesheet).to_csv(data_directory + i + '_quarterly_balancesheet.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n else:\n print(\"No stock_details.quarterly_balancesheet for:\", stock)\n if ((stock_details.balance_sheet) is not None):\n (stock_details.balance_sheet).to_csv(data_directory + i + '_balance_sheet.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n else:\n print(\"No stock_details.balance_sheet for:\", stock)\n if ((stock_details.actions) is not None):\n (stock_details.actions).to_csv(data_directory + i + '_actions_dividends_stock_splits.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n else:\n print(\"No stock_details.actions for:\", stock)\n if ((type(stock_details.institutional_holders) is not None) and (stock_details.institutional_holders is not None)):\n (stock_details.institutional_holders).to_csv(data_directory + i + '_institutional_holders.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n else:\n print(\"No stock_details.institutional_holders for:\", stock)\n if ((stock_details.major_holders) is not None):\n (stock_details.major_holders).to_csv(data_directory + i + '_major_holders.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n else:\n print(\"No stock_details.major_holders for:\", stock)\n if ((stock_details.quarterly_earnings) is not None):\n (stock_details.quarterly_earnings).to_csv(data_directory + i + '_quarterly_earnings.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n else:\n print(\"No stock_details.quarterly_earnings for:\", stock)\n if ((stock_details.quarterly_cashflow) is not None):\n (stock_details.quarterly_cashflow).to_csv(data_directory + i + '_quarterly_cashflow.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n else:\n print(\"No stock_details.quarterly_cashflow for:\", stock)\n #if (type(stock_details.quarterly_financials) != None):\n #stock_details.stats()\n\n #print((stock_details.get_financials()).loc['Total Revenue']) #Will raise an exception in get_ticker_info() and push to bad_tickers list if not available\n\n return sd\n\n\ndef get_ticker_info(tickers):\n stock_details = pd.DataFrame()\n df_all_stock_details = pd.DataFrame()\n bad_tickers =[]\n\n for i in tickers:\n\n try:\n diff = dt.timedelta(0)\n if (os.path.exists(data_directory + i + '_info.csv')):\n mtime = os.path.getmtime(data_directory + i + '_info.csv')\n #print(\"Last modification time since the epoch:\", mtime)\n \n last_modified_date = date.fromtimestamp(mtime)\n today = dt.datetime.today().date()\n\n # calculate no. of days since the file was last downloaded\n diff = today - last_modified_date\n #print(\"Last modification date for {} is {} ({} Days)\".format(i, last_modified_date, diff.days))\n\n # if downloaded more than 30 day before, download fresh data\n if(diff.days > 20):\n stock_details = download_and_save_stock_info(i, data_directory)\n elif (os.path.exists(data_directory + i + '_info.csv')):\n #print('Reading from local file. Path:' + data_directory + i + '_info.csv')\n stock_details = pd.read_csv(data_directory + i + '_info.csv', sep = ',', index_col=0, header=None ) # pointing the header to row# 0 assigns column name information to be parsed from the top row.\n \n sd = stock_details\n sd.columns = [i]\n \n\n except Exception as e:\n exc_type, exc_value, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"Oops!\", exc_type, exc_value, fname, exc_tb.tb_lineno, \"occurred for:\", i, traceback.format_exc())\n bad_tickers.append(i)\n continue\n\n df_all_stock_details = pd.concat([df_all_stock_details, sd],axis =1)\n \n\n pd.set_option('display.max_columns', None)\n df_all_stock_details.T.to_csv(data_directory + 'AllStockDetails.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\n return(df_all_stock_details.T, bad_tickers)\n\n\n# List of stocks that did not have financial data\n#tickers = ['3PLAND.NS', '63MOONS.NS', 'AGRITECH.NS', 'BEARD-RE.NS', 'CLSEL.NS', 'DCMSRIND.NS', 'FOODSIN.NS', 'ICDSLTD.NS', 'INTLCONV.NS', 'JPOLYINVST.NS', 'JTLINFRA.NS', 'KALYANI.NS', 'KHAICHEM.NS', 'NDGL.NS', 'NIRAJISPAT.NS', 'PASUPTAC.NS', 'S&SPOWER.NS', 'SHIVALIK.NS', 'SHIVAM-RE.NS', 'SHYAMCENT.NS', 'STYLAMIND.NS', 'SURANASOL.NS', 'SVPGLOB.NS', 'THANGAMAYL.NS','VLIFEPP.NS']\n\ndf_all_stock_details, bad_tickers = get_ticker_info(tickers)\ndf_all_stock_details.head()\nbad_tickers\n\n\n# %%\ndef epochToDateTime(epochtime):\n date_time = datetime.datetime.fromtimestamp(epochtime)\n print(datetime)\n\ndef download_stock_price(stock, start, end):\n ret_val = -1\n print(stock, start, end)\n if start < end:\n df_single_stock = pd.DataFrame()\n #print(\"Downloading stock data for {} from internet for the period {} to {}\".format(stock, start, end))\n try:\n df_single_stock = yf.download(stock,start,end)\n except Exception as e:\n exc_type, exc_value, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"Oops!\", exc_type, exc_value, fname, exc_tb.tb_lineno, \"occurred\", traceback.format_exc())\n #finally:\n # - ^NSEI: Data doesn't exist for startDate = 1009238400, endDate = 1189987200\n print(\"exc_type:\", exc_type)\n print(\"exc_value:\", exc_value)\n print(\"exc_tb:\", exc_tb)\n \n if (len(df_single_stock.index) > 0):\n ret_val = 0\n else:\n print(\"Failed download: No data found for this date range, symbol {}\".format(stock))\n elif start >= end:\n print(\"Failed download: start should be before end, symbol {}\".format(stock))\n\n return ret_val, df_single_stock\n\n\ndef get_ticker_adj_close_price_and_percent_change(start, end, tickers, data_directory):\n\n DF_percentage_change = pd.DataFrame()\n df_single_stock = pd.DataFrame()\n df_percentage_change = pd.DataFrame()\n DF_all_stock_price = pd.DataFrame()\n bad_tickers =[]\n\n dateparse = lambda x: datetime.datetime.strptime(x, '%Y-%m-%d')\n\n for i in tickers:\n #print(i)\n try:\n if (os.path.exists(data_directory + i +'.csv')):\n #print('Reading from local file. Path:' + data_directory + i + '.csv for:' + i)\n df_single_stock = pd.read_csv(data_directory + i + '.csv', delimiter=',', parse_dates=['Date'], date_parser=dateparse, index_col='Date', header=0)\n #df_single_stock = pd.read_csv(data_directory + i + '.csv', delimiter=',', index_col='Date', header=0)\n df_single_stock = df_single_stock.sort_values(by='Date')\n\n str_dt = np.datetime64(df_single_stock.head(1).index.values[0], 'D')\n start_dt = start\n start_dt = np.datetime64(start_dt, 'D')\n str_DD = np.timedelta64(start_dt - str_dt, 'D')\n #print(str_dt, start_dt, str_DD) \n\n en_dt = np.datetime64(df_single_stock.tail(1).index.values[0], 'D')\n end_dt = end\n end_dt = np.datetime64(end_dt, 'D')\n en_DD = np.timedelta64(end_dt - en_dt, 'D')\n #print(i, str_dt, start_dt, str_DD, en_dt, end_dt, en_DD)\n\n # if a file exists for the stock with a latter date, that generally indicates that the stock was \n # listed on the start date available on the file. So only needs to get data from en_dt to end_dt\n \n #if (str_DD < 0 and en_DD > 0) or (en_DD > 0):\n if (en_DD > 1): # en_DD = 1 day then you have data till yesterday (latest) unless you run this after close of stock market 3:30 pm IST\n\n #ret_val_str, df_single_stock_str = download_stock_price(i, str(start_dt), str(str_dt))\n ret_val_en, df_single_stock_en = download_stock_price(i, str(en_dt+1), str(end_dt))\n #if (ret_val_str == -1 and ret_val_en == -1):\n if (ret_val_en == -1):\n continue\n # Remove header\n #df_single_stock = pd.concat([df_single_stock_str, df_single_stock, df_single_stock_en])\n df_single_stock = pd.concat([df_single_stock, df_single_stock_en])\n\n\n # elif (str_DD < 0):\n # # Download delta\n # ret_val_str, df_single_stock_str = download_stock_price(i, str(start_dt), str(str_dt))\n\n # if (ret_val_str == -1):\n # continue\n # # Remove header\n # df_single_stock = pd.concat([df_single_stock_str,df_single_stock])\n\n\n elif(en_DD > 1):\n # Download delta\n ret_val_en, df_single_stock_en = download_stock_price(i, str(en_dt+1), str(end_dt))\n if (ret_val_en == -1):\n continue\n\n df_single_stock = pd.concat([df_single_stock,df_single_stock_en])\n\n # Just need to use the data read from the file.\n #elif (en_DD == 0): #and str_DD == 0):\n # continue\n\n else:\n ret_val, df_single_stock = download_stock_price(i, start, end)\n if (ret_val == -1):\n tickers.remove(i)\n bad_tickers.append(i)\n continue\n\n #print(\"Write the file for:\", i)\n # Remove rows with duplicate indices:\n #df_single_stock.set_index('Date')\n df_single_stock = df_single_stock.loc[~df_single_stock.index.duplicated(keep='first')]\n df_single_stock.to_csv(data_directory + i + '.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\n # USE dropna() on the raw price data before calculating returns using pct_change\n # Use Adjusted prices only\n df_all_stock_price = df_single_stock[['Adj Close']]#.dropna()\n df_all_stock_price.columns = [i]\n\n df_percentage_change = df_all_stock_price.pct_change()\n df_percentage_change.columns = [i]\n\n # pd.concat requires that the indices be unique. To remove rows with duplicate indices, use:\n df_percentage_change = df_percentage_change.loc[~df_percentage_change.index.duplicated(keep='first')]\n df_all_stock_price = df_all_stock_price.loc[~df_all_stock_price.index.duplicated(keep='first')]\n \n DF_percentage_change = pd.concat([DF_percentage_change, df_percentage_change.round(5)], axis =1)\n DF_all_stock_price = pd.concat([DF_all_stock_price, df_all_stock_price.round(5)], axis =1)\n except ValueError:\n print(\"Oops ValueError!\", sys.exc_info(), \"occurred.\")\n bad_tickers.append(i)\n except Exception as e:\n exc_type, exc_value, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"Oops!\", exc_type, exc_value, fname, exc_tb.tb_lineno, \"occurred\", i, traceback.format_exc())\n bad_tickers.append(i)\n \n return(DF_percentage_change, DF_all_stock_price, bad_tickers)\n\n\n#df_adj_close_percent_change, df_all_stock_price, bad_tickers = get_ticker_adj_close_price_and_percent_change(start, end, tickers)\n\n\n# %%\n\ndf_adj_close_percent_change, df_all_stock_price, bad_tickers = get_ticker_adj_close_price_and_percent_change(start, end, tickers, data_directory)\ndf_adj_close_percent_change.head()\n\ndf_all_stock_price.head()\nprint(len(df_adj_close_percent_change.index), len(df_all_stock_price.index))\nif (len(df_adj_close_percent_change.index) > 0 and len(df_all_stock_price.index) > 0):\n df_adj_close_percent_change.to_csv(data_directory + 'AdjCloseStockPercentChange.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n df_all_stock_price.to_csv(data_directory + 'AllStockPrice.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\nelse:\n print(\"No entries to write!\")\n\n\n# %%\nbad_tickers\nlen(bad_tickers)\n\n\ngood_tickets = [x for x in tickers if x not in bad_tickers]\nlen(good_tickets)\n\n\n# %%\ndf_adj_close_percent_change.tail(2)\ndf_all_stock_price.tail(2)\n\n\n# %%\nbad_tickers\nlen(bad_tickers)\n\n\n# %%\ntickers\nlen(tickers)\n\ntickers = [i for i in tickers if i not in bad_tickers]\nlen(tickers)\n\n\n# %%\nna__values = [\"\",\n \"#N/A\",\n \"#N/A N/A\",\n \"#NA\",\n \"-1.#IND\",\n \"-1.#QNAN\",\n \"-NaN\",\n \"-nan\",\n \"1.#IND\",\n \"1.#QNAN\",\n \"<NA>\",\n \"N/A\",\n \"NULL\",\n \"NaN\",\n \"n/a\", \n \"nan\", \n \"null\"]\n \ndef get_adj_close_price_percentage_change_of_all_stocks():\n df_adj_close_percent_change = pd.DataFrame()\n if (os.path.exists(data_directory + 'AdjCloseStockPercentChange.csv')):\n print('Reading from local file. Path:' + data_directory + 'AdjCloseStockPercentChange.csv')\n df_adj_close_percent_change = pd.read_csv(data_directory + 'AdjCloseStockPercentChange.csv', delimiter=',', na_values=na__values, keep_default_na=False, parse_dates=True, date_parser=dateparse, index_col='Date' )\n df_adj_close_percent_change = df_adj_close_percent_change.sort_values(by='Date')\n df_adj_close_percent_change.shape\n else:\n print(\"File does not exist!!\")\n return df_adj_close_percent_change\n\ndef get_all_stock_price():\n df_all_stock_price = pd.DataFrame()\n if (os.path.exists(data_directory + 'AllStockPrice.csv')):\n print('Reading from local file. Path:' + data_directory + 'AllStockPrice.csv')\n df_all_stock_price = pd.read_csv(data_directory + 'AllStockPrice.csv', delimiter=',', na_values=na__values, keep_default_na=False, parse_dates=True, date_parser=dateparse, index_col='Date' )\n df_all_stock_price = df_all_stock_price.sort_values(by='Date')\n df_all_stock_price.shape\n else:\n print(\"File does not exist!!\")\n return df_all_stock_price\n\n\ndf_adj_close_percent_change = get_adj_close_price_percentage_change_of_all_stocks()\ndf_all_stock_price = get_all_stock_price()\ndf_adj_close_percent_change.head()\ndf_all_stock_price.head()\n\n# %% [markdown]\n# The below caculation is lower, since it should be modified to consider the dividend returns.\n\n# %%\n#################################################################################################\n# CALCULATE Returns for different time periods #\n#################################################################################################\ndf_all_stock_price.tail(2)\ndf_all_stock_returns = pd.DataFrame()\n\n\ndef valid_dataframe(df):\n first_idx = df.first_valid_index()\n last_idx = df.last_valid_index()\n #print(first_idx, last_idx)\n valid_df = df.loc[first_idx:last_idx]\n return valid_df\n\n\ndef evaluate_all_stock_returns(days):\n res = pd.DataFrame()\n try:\n\n valid_df_all_stock_price = valid_dataframe(df_all_stock_price)\n\n en_dt = np.datetime64(valid_df_all_stock_price.tail(1).index.values[0], 'D')\n i = 0\n while (len(valid_df_all_stock_price.columns) - valid_df_all_stock_price.loc[en_dt].isnull().sum() <= 1) and (i < 31):\n i = i + 1\n print(\"Changed from {} to {}\".format(en_dt, np.datetime64(valid_df_all_stock_price.tail(1 + i).index.values[0])))\n en_dt = np.datetime64(valid_df_all_stock_price.tail(1 + i).index.values[0], 'D')\n\n str_dt = np.datetime64(valid_df_all_stock_price.tail(1 + days).index.values[0], 'D')\n i = 0\n while (len(valid_df_all_stock_price.columns) - valid_df_all_stock_price.loc[str_dt].isnull().sum() <= 1) and (i < 31):\n i = i + 1\n print(\"Changed from {} to {}\".format(str_dt, np.datetime64(valid_df_all_stock_price.tail(1 + days + i).index.values[0])))\n str_dt = np.datetime64(valid_df_all_stock_price.tail(1 + days + i).index.values[0], 'D')\n\n res = (valid_df_all_stock_price.loc[en_dt].subtract(valid_df_all_stock_price.loc[str_dt])).truediv(valid_df_all_stock_price.loc[str_dt]).multiply(100)\n\n except Exception as e:\n exc_type, exc_value, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"Oops!\", exc_type, exc_value, fname, exc_tb.tb_lineno, \"occurred\", traceback.format_exc())\n return res\n\n\n\n#1 DAY\ndays = 1\nrow_name = '1 DAY'\ndf_all_stock_returns[row_name] = evaluate_all_stock_returns(days)\n\n#1 WEEK\ndays = 7\nrow_name = '1 WEEK'\ndf_all_stock_returns[row_name] = evaluate_all_stock_returns(days)\n\n#1 MONTH\ndays = 30\nrow_name = '1 MONTH'\ndf_all_stock_returns[row_name] = evaluate_all_stock_returns(days)\n\n#6 MONTHS\ndays = 30 * 6\nrow_name = '6 MONTHS'\ndf_all_stock_returns[row_name] = evaluate_all_stock_returns(days)\n\n#1 YEAR\ndays = 365\nrow_name = '1 YEAR'\ndf_all_stock_returns[row_name] = evaluate_all_stock_returns(days)\n\n#2 YEARS\ndays = 365*2\nrow_name = '2 YEAR'\ndf_all_stock_returns[row_name] = evaluate_all_stock_returns(days)\n\n#3 YEARS\ndays = 365*3\nrow_name = '3 YEAR'\ndf_all_stock_returns[row_name] = evaluate_all_stock_returns(days)\n\n#5 YEARS\ndays = 365*5\nrow_name = '5 YEAR'\ndf_all_stock_returns[row_name] = evaluate_all_stock_returns(days)\n\n#MAX YEARS\nrow_name = 'MAX'\nday_one_value_of_all_stocks = df_all_stock_price.T.apply(lambda x: np.nan if all(x.isnull()) else x[x.first_valid_index()], axis=1)#.tolist()\nen_dt = np.datetime64(df_all_stock_price.tail(1).index.values[0], 'D')\ni = 0\nwhile (len(df_all_stock_price.columns) - df_all_stock_price.loc[en_dt].isnull().sum() <= 1) and (i < 31):\n i = i + 1\n print(\"Changed from {} to {}\".format(en_dt, np.datetime64(df_all_stock_price.tail(1 + i).index.values[0])))\n en_dt = np.datetime64(df_all_stock_price.tail(1 + i).index.values[0], 'D')\ndf_all_stock_returns[row_name] = (df_all_stock_price.loc[en_dt].subtract(day_one_value_of_all_stocks)).truediv(day_one_value_of_all_stocks).multiply(100)\n\n\ndf_all_stock_returns.to_csv(data_directory + 'AllStockReturns.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\ndf_all_stock_returns.head()\n\n\n# %%\n#%%capture\n#################################################################################################\n# Standard Dev Calculation #\n#################################################################################################\npd.set_option('display.max_columns', None)\n\ndef calc_all_stocks_standard_deviation(df_adj_close_percent_change):\n #print(df_adj_close_percent_change.columns)\n #print(len(df_adj_close_percent_change.columns))\n np_RiskAdjustedReturn_parameters = np.zeros((1,))\n\n np_standard_deviation = np.zeros(1,)\n np_standard_deviation_days_calc = np.zeros(1,)\n\n for c, percentchangelst in df_adj_close_percent_change.items():\n non_nan_column_values = percentchangelst.values[~np.isnan(percentchangelst.values)]\n #print(c, \"Reduced from percentchangelst {} to non_nan_column_values:{}\".format(len(percentchangelst), len(non_nan_column_values)))\n np_standard_deviation = np.append(np_standard_deviation, non_nan_column_values.std(ddof=1))\n np_standard_deviation_days_calc = np.append(np_standard_deviation_days_calc, len(non_nan_column_values))\n\n #return np_standard_deviation\n np_standard_deviation = np_standard_deviation[1:].reshape(1, len(df_adj_close_percent_change.columns)) #len(tickers))\n np_standard_deviation_days_calc = np_standard_deviation_days_calc[1:].reshape(1, len(df_adj_close_percent_change.columns)) #len(tickers))\n #stock_StandardDeviation = pd.DataFrame(data = np_standard_deviation, columns = tickers, index=[\"Standard Deviation\"])\n stock_StandardDeviation = pd.DataFrame(data = np_standard_deviation, columns = df_adj_close_percent_change.columns, index=[\"Standard Deviation\"])\n return np_standard_deviation, np_standard_deviation_days_calc, stock_StandardDeviation\n\nnp_standard_deviation, np_standard_deviation_days_calc, stock_StandardDeviation = calc_all_stocks_standard_deviation(df_adj_close_percent_change)\n\n\n# %%\n#################################################################################################\n# Max Drawdown, Average Drawdown #\n#################################################################################################\n# Around 4 mins execution time\n\nnum_of_rows = len(df_adj_close_percent_change.index)\nindex_list = list(df_adj_close_percent_change.index)\nheader_list = list(df_adj_close_percent_change.head())\n\nprint(\"rows:{} .Change tickers from len(tickers):{} columns:{}\".format(num_of_rows, len(tickers), len(header_list)))\ntickers = header_list # May be a reduced in number here\n\nnp_NSE = df_adj_close_percent_change[['^NSEI']].to_numpy().reshape(num_of_rows, 1)\n# Create a matrix with 'total_no_of_scripts_including_NSE' duplicate columns of NSE (total_no_days x total_no_of_scripts_including_NSE )\nnp_NSE = np.tile(np_NSE, (1, len(header_list)))\n\nnp_stock = df_adj_close_percent_change.to_numpy()\n\ndef calc_stock_outperform(np_NSE, np_stock,tickers):\n\tnp_stock_Outperform = np.subtract(np_stock, np_NSE).round(5)\n\tstock_Outperform = pd.DataFrame(data = np_stock_Outperform, index = index_list, columns = tickers)\n\tstock_Outperform.to_csv(data_directory + 'stock_Outperform.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\treturn stock_Outperform\n\n#def calc_stock_excess_return(np_stock):\n\t#np_stock_ExcessReturn = np.subtract(np_stock, monthly_risk_free_interest_rate)\n\t#stock_ExcessReturn = pd.DataFrame(data = np_stock_ExcessReturn, index = index_list, columns = tickers)\n\t#stock_ExcessReturn.to_csv(data_directory + 'stock_ExcessReturn.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\t#return np_stock_ExcessReturn\n\ndef calc_stock_abnormal(np_stock, index_list, tickers):\n\tnp_stock_ExcessReturn = np.subtract(np_stock, monthly_risk_free_interest_rate)\n\tfor scrip, std in stock_StandardDeviation.items(): #calc_all_stock_standard_deviation(df_adj_close_percent_change).items():\n\t\tstandardDV = std.values[0]\n\t\tnp_benchmark_with_std = np.multiply(np_NSE, standardDV)\n\t\t# Abnormal = ExcessReturn_Stock - StandardDeviation(Return) * ExcessReturn_Nifty50\n\t\tnp_stock_Abnormal = np.subtract(np_stock_ExcessReturn, np_benchmark_with_std)\n\tstock_Abnormal = pd.DataFrame(data = np_stock_Abnormal, index = index_list, columns = tickers)\n\tstock_Abnormal.to_csv(data_directory + 'stock_Abnormal.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\treturn stock_Abnormal\n\ndef calc_stock_downside(np_stock, index_list, tickers):\n\tnp_zero = np.zeros( (num_of_rows, len(tickers)), dtype=int )\n\tnp_stock_Downside = np.minimum(np_stock, np_zero)\n\tstock_Downside = pd.DataFrame(data = np_stock_Downside, index = index_list, columns = tickers)\n\tstock_Downside.to_csv(data_directory + 'stock_Downside.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\treturn stock_Downside\n\n\nstock_Outperform = calc_stock_outperform(np_NSE, np_stock, tickers)\nstock_Abnormal = calc_stock_abnormal(np_stock, index_list, tickers)\nstock_Downside = calc_stock_downside(np_stock, index_list, tickers)\n\ndef CAGR(first, last, periods_years):\n return (last/first)**(1/periods_years)-1\n\n# CumulativeReturn = CumulativeReturn * (1 + df_adj_close_percent_change[c][r])\nnp_ones = np.ones( (num_of_rows, len(tickers)) )\nnp_stock_onepluseDailyReturn = np.add(np_stock, np_ones).round(5)\n\nnp_stock_CumulativeReturn = np.zeros((1,))\nnp_stock_AnnualisedReturn = np.zeros((4,1))\nfaulty_holding_period_stock=[]\n\n\nfor col in range(np_stock_onepluseDailyReturn.shape[1]):\n\tc = np_stock_onepluseDailyReturn[:, col] #array([ nan, 0.9765 , 0.98443, 0.97902, 1.00756, 0.99222, 0.99645, 0.98004, 0.99172, 0.98506, ..., 0.9804 , 1.00498, 0.99496, 1.00696, 0.97093, 1.00162, 0.99585, 1.01082, 1.01367, nan])\n\tnon_nan_column_values = c[~np.isnan(c)]\n\tnon_nan_indices_list = np.argwhere(~np.isnan(c))\n\t\n\tif (non_nan_column_values.size == 0): #All values in the cloumn are NaN\n\t\tcolumnar_product = [0.0]\n\t\tcolumnar_product1 = 0.0\n\t\tstartd = dt.datetime(1970, 1, 1)\n\t\tendd = dt.datetime(1970, 1, 1)\n\t\tno_of_trading_days = 0.0\n\telse:\n\t\tcolumnar_product = np.prod(non_nan_column_values).reshape(1,)\n\t\tcolumnar_product1 = columnar_product.cumprod()[-1].round(5) #last entry of the column\n\t\t#non_nan_indices_list\n\t\tif (len(non_nan_indices_list) > 0):\n\t\t\tstartd = index_list[non_nan_indices_list[0][0]]\n\t\t\tendd = index_list[non_nan_indices_list[-1][0]]\n\t\t\tno_of_trading_days = len(non_nan_indices_list)\n\n\tnp_stock_CumulativeReturn = np.concatenate( (np_stock_CumulativeReturn, columnar_product) )\n\tnon_nan_indices_list = np.argwhere(~np.isnan(c))\n\tif (len(non_nan_indices_list) > 0):\n\t\tstartd = index_list[non_nan_indices_list[0][0]]\n\t\tendd = index_list[non_nan_indices_list[-1][0]]\n\t\tno_of_trading_days = len(non_nan_indices_list)\n\t\n\tno_of_trading_years = no_of_trading_days/252\n\t# no_of_trading_days gives no. of trading days in our time horizon\n\t# # 252 is no. of trading days in a year\n\tif (no_of_trading_years != 0):\n\t\tAnnualisedReturn = (columnar_product1)**(1/no_of_trading_years) - 1 # Computed CAGR is for daily returns, if you use weekly/intraday data, change no_of_trading_years\n\t\tAnnualisedReturn = AnnualisedReturn.round(5)\n\telse:\n\t\tAnnualisedReturn = 0.0\n\n\tif (non_nan_column_values.size == 0): #All values in the cloumn are NaN\n\t\tprint(\"Scrip:{} startd:{} endd:{} no_of_trading_years:{} AnnualisedReturn:{}\".format(tickers[col] ,startd, endd, no_of_trading_years, AnnualisedReturn))\n\tnp_stock_AnnualisedReturn = np.hstack( (np_stock_AnnualisedReturn, np.array( [[startd], [endd], [no_of_trading_years], [AnnualisedReturn]] ) ))\n\n\nnp_stock_CumulativeReturn = np_stock_CumulativeReturn[1:].reshape(1,len(tickers))\n#print(\"np_stock_CumulativeReturn:\", np_stock_CumulativeReturn)\n\nnp_stock_AnnualisedReturn = np_stock_AnnualisedReturn[:,1:]\n#print(\"stock_AnnualisedReturn:\", np_stock_AnnualisedReturn)\n\nstock_CumulativeReturn = pd.DataFrame(data = np_stock_CumulativeReturn, index=[\"Cumulative Return\"], columns = tickers)\nstock_CumulativeReturn.to_csv(data_directory + 'stock_CumulativeReturn2.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\nstock_AnnualisedReturn = pd.DataFrame(data = np_stock_AnnualisedReturn, index =[\"Start Date\", \"End Date\", \"Holding Years\", \"Annualised Return\"], columns = tickers)\nstock_AnnualisedReturn.to_csv(data_directory + 'stock_AnnualisedReturn.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\n\ncleaned_np_stock = np.nan_to_num(np_stock, copy=True, nan=0.0, posinf=None, neginf=None)\nstock_CleanedStocks_Drawdown = pd.DataFrame(data = cleaned_np_stock, index = index_list, columns = tickers)\nstock_CleanedStocks_Drawdown.to_csv(data_directory + 'stock_CleanedStocks_Drawdown.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\nnp_drawdown = np.zeros((1,len(tickers)))\nnp_drawdown_row = np.zeros((1,len(tickers)))\n\nnum_of_rows = len(stock_CleanedStocks_Drawdown.index)\nindex_list = list(stock_CleanedStocks_Drawdown.index)\n\nfor r in cleaned_np_stock:\n\t#print(\"r:\", r, len(r))\n\ttry:\n\t\tdf_adj_close_percent_change_plus_one = np.add(r, 1.0)\n\texcept:\n\t\tprint(\"Oops!\", sys.exc_info(), \"occurred.\")\n\t\tprint(\"Problem with np_stock row:\", r)\n\t\tcontinue\n\t# Drawdown = Min((1+ Previous_Drawdown )*(1+ Return) -1, 0)\n\tnp_drawdown_row = np.minimum( (np.multiply( np.add(np_drawdown_row, 1), df_adj_close_percent_change_plus_one) -1), 0)\n\tnp_drawdown = np.vstack((np_drawdown, np_drawdown_row))\nnp_drawdown = np_drawdown[1:]\n\nstock_Drawdown = pd.DataFrame(data = np_drawdown, columns = tickers, index=index_list)\nstock_Drawdown.to_csv(data_directory + 'stock_Drawdown.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\nnp_max_drawdown = np.zeros((1,))\nnp_average_drawdown = np.zeros((1,))\n\nfor c in stock_Drawdown:\n max_drawdown = -1 * min(stock_Drawdown[c].dropna())\n average_drawdown = np.average(stock_Drawdown[c].dropna())\n np_max_drawdown = np.hstack((np_max_drawdown, max_drawdown))\n np_average_drawdown = np.hstack((np_average_drawdown, average_drawdown))\n\nstock_MaxAverageDrawdown = pd.concat([pd.DataFrame(data = np_max_drawdown[1:].reshape(1, len(tickers)), index=[\"Max Drawdown\"], columns = tickers), pd.DataFrame(data = np_average_drawdown[1:].reshape(1, len(tickers)), index=[\"Average Drawdown\"], columns = tickers)])\nstock_MaxAverageDrawdown.to_csv(data_directory + 'stock_AnnualisedReturn.csv', mode='a', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\nnp_max_drawdown = np_max_drawdown[1:].reshape(1, len(tickers))\nnp_average_drawdown = np_average_drawdown[1:].reshape(1, len(tickers))\n\n#np_RiskAdjustedReturn_parameters = np.hstack( (np_standard_deviation, np_standard_deviation_days_calc, np_stock_AnnualisedReturn, np_max_drawdown, np_average_drawdown))\nnp_RiskAdjustedReturn_parameters = np.vstack((np_standard_deviation, np_standard_deviation_days_calc, np_max_drawdown, np_average_drawdown))\n#np_stock_AnnualisedReturn.shape\n#np_RiskAdjustedReturn_parameters.shape\nnp_RiskAdjustedReturn_parameters = np_RiskAdjustedReturn_parameters.reshape(4, len(tickers))\n#np_stock_AnnualisedReturn[0]\n#np_RiskAdjustedReturn_parameters[0]\nnp_RiskAdjustedReturn_parameters = np.vstack( (np_stock_AnnualisedReturn, np_RiskAdjustedReturn_parameters))\n#np_RiskAdjustedReturn_parameters.shape\nnp_RiskAdjustedReturn_parameters = np_RiskAdjustedReturn_parameters.reshape(8, len(tickers))\nprint(\"*****************************\")\n#np_RiskAdjustedReturn_parameters[0]\nstock_RiskAdjustedReturn_parameters = pd.DataFrame(data = np_RiskAdjustedReturn_parameters, index =[\"Start Date\", \"End Date\", \"Holding Period Years\", \"Annualised return\", \"Standard Deviation\",\"Standard Deviation Days Considered\", \"Max drawdown\", \"Average drawdown\"], columns = tickers)\nstock_RiskAdjustedReturn_parameters.to_csv(data_directory + 'RiskAdjustedReturn_parameters.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\n\n# %%\ndef remove_duplicate_columns(stock_RiskAdjustedReturn_parameters):\n \"\"\"\n Remove any duplicate column(s) in the dataframe\n \"\"\"\n stock_RiskAdjustedReturn_parameters.reset_index()\n stock_RiskAdjustedReturn_parameters\n stock_RiskAdjustedReturn_parameters_header = stock_RiskAdjustedReturn_parameters.columns\n #stock_RiskAdjustedReturn_parameters_header = stock_RiskAdjustedReturn_parameters_header.insert(0, index_column_name)\n #stock_RiskAdjustedReturn_parameters.drop_duplicates(subset=['timestamp', 'user_id'])\n stock_RiskAdjustedReturn_parameters = stock_RiskAdjustedReturn_parameters.T.reset_index().drop_duplicates().T\n stock_RiskAdjustedReturn_parameters\n stock_RiskAdjustedReturn_parameters = stock_RiskAdjustedReturn_parameters.iloc[1: , :]\n stock_RiskAdjustedReturn_parameters.columns = stock_RiskAdjustedReturn_parameters_header\n #stock_RiskAdjustedReturn_parameters.set_index(index_column_name, inplace=True)\n return stock_RiskAdjustedReturn_parameters\n\nna__values = [\"\",\n \"#N/A\",\n \"#N/A N/A\",\n \"#NA\",\n \"-1.#IND\",\n \"-1.#QNAN\",\n \"-NaN\",\n \"-nan\",\n \"1.#IND\",\n \"1.#QNAN\",\n \"<NA>\",\n \"N/A\",\n \"NULL\",\n \"NaN\",\n \"n/a\", \n \"nan\", \n \"null\"]\n\nstock_RiskAdjustedReturn_parameters = pd.read_csv(data_directory + 'RiskAdjustedReturn_parameters.csv', delimiter=',', na_values=na__values, keep_default_na=False, parse_dates=True, encoding = 'utf-8', index_col=0, header=0 )\nstock_RiskAdjustedReturn_parameters = remove_duplicate_columns(stock_RiskAdjustedReturn_parameters)\nstock_RiskAdjustedReturn_parameters\n\nstock_Drawdown = pd.read_csv(data_directory + 'stock_Downside.csv', delimiter=',', na_values=na__values, keep_default_na=False, parse_dates=True, encoding = 'utf-8', index_col=0, header=0 )\nstock_Drawdown = remove_duplicate_columns(stock_Drawdown)\nstock_Drawdown.tail(2)\nstock_Drawdown.shape\n\n\n# %%\nnp_annualised_risk=np.zeros((1,))\nnp_sharpe_ratio=np.zeros((1,))\nnp_sortino_ratio=np.zeros((1,))\nnp_calmar_ratio=np.zeros((1,))\nnp_sterling_ratio=np.zeros((1,))\nfaulty_holding_period_stock=[]\n\nstock_RiskAdjustedReturn_parameters\n\nfor c in tickers:\n sharpe_ratio = 0.0\n #print(c, stock_Parameters[c][2], stock_Parameters[c][3], stock_Parameters[c][4])\n try:\n holding_period_years = float(stock_RiskAdjustedReturn_parameters[c][2])\n annualised_return = float(stock_RiskAdjustedReturn_parameters[c][3])\n standard_deviation = float(stock_RiskAdjustedReturn_parameters[c][4])\n standard_deviation_days_calc = stock_RiskAdjustedReturn_parameters[c][5]\n\n if(stock_RiskAdjustedReturn_parameters[c].values[6] == ''):\n max_drawdown = float(stock_RiskAdjustedReturn_parameters[c].values[7])\n else:\n max_drawdown = float(stock_RiskAdjustedReturn_parameters[c].values[6]) #float(stock_Parameters[c][5])\n average_drawdown = float(stock_RiskAdjustedReturn_parameters[c].values[7]) #float(stock_Parameters[c][6])\n except KeyError:\n stock_RiskAdjustedReturn_parameters[c][2]\n stock_RiskAdjustedReturn_parameters[c][3]\n stock_RiskAdjustedReturn_parameters[c][4]\n holding_period_years = (stock_RiskAdjustedReturn_parameters[c][2])\n annualised_return = (stock_RiskAdjustedReturn_parameters[c][3])\n standard_deviation = (stock_RiskAdjustedReturn_parameters[c][4])\n standard_deviation_days_calc = stock_RiskAdjustedReturn_parameters[c][5]\n\n if(stock_RiskAdjustedReturn_parameters[c].values[6] == ''):\n max_drawdown = (stock_RiskAdjustedReturn_parameters[c].values[7])\n else:\n max_drawdown = (stock_RiskAdjustedReturn_parameters[c].values[6]) #float(stock_Parameters[c][5])\n \n average_drawdown = (stock_RiskAdjustedReturn_parameters[c].values[7]) #float(stock_Parameters[c][6]) \n except Exception as e:\n exc_type, exc_value, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"Oops!\", exc_type, exc_value, fname, exc_tb.tb_lineno, \"occurred\", traceback.format_exc())\n\n\n # There are 252 trading days in a given year. https://www.fool.com/knowledge-center/how-to-calculate-annualized-volatility.aspx\n # annualised_risk = standard_deviation * math.sqrt(252)\n if(holding_period_years >= 0):\n annualised_risk = standard_deviation * math.sqrt(holding_period_years) #12 Years????\n else:\n print(\"holding_period_years is -ve. Option to use cmath, but would provide complex number!\")\n faulty_holding_period_stock.append(c)\n #print(\"annualised_risk:\", annualised_risk)\n \n #np_annualised_risk = np.append(np_annualised_risk, np.array([annualised_risk]), axis=1)\n np_annualised_risk = np.hstack((np_annualised_risk, np.array([annualised_risk])))\n\n if (annualised_risk != 0):\n sharpe_ratio = (annualised_return - annual_risk_free_interest_rate)/annualised_risk\n else:\n sharpe_ratio = 0\n #print(\"sharpe_ratio:\", sharpe_ratio, \"<= annualised_return:\", annualised_return, \"annual_risk_free_interest_rate:\", annual_risk_free_interest_rate, \"annualised_risk:\", annualised_risk )\n #np_sharpe_ratio = np.append(np_sharpe_ratio, np.array([sharpe_ratio]), axis=1)\n np_sharpe_ratio = np.hstack((np_sharpe_ratio, np.array([sharpe_ratio])))\n\n sum_of_squares_of_list = sum(map(lambda i : i * i, stock_Downside[c].dropna()))\n #semideviation = math.sqrt(sum_of_squares_of_list/len(df.index)) * math.sqrt(252) # df.index???????????????????\n if (len(df_all_stock_price.index) > 0):\n semideviation = math.sqrt(sum_of_squares_of_list/len(df_all_stock_price.index)) * math.sqrt(252) # df.index???????????????????\n else:\n print(\"No data for the stock!!\")\n\n if(semideviation != 0):\n sortino_ratio = (annualised_return - annual_risk_free_interest_rate) / semideviation\n else:\n sortino_ratio = 0\n if(max_drawdown != 0):\n calmar_ratio = (annualised_return - annual_risk_free_interest_rate) / max_drawdown\n else:\n calmar_ratio = 0\n if(average_drawdown != 0):\n sterling_ratio = (annualised_return - annual_risk_free_interest_rate) / average_drawdown\n else:\n sterling_ratio = 0\n\n np_sortino_ratio = np.hstack((np_sortino_ratio, np.array([sortino_ratio])))\n np_calmar_ratio = np.hstack((np_calmar_ratio, np.array([calmar_ratio])))\n np_sterling_ratio = np.hstack((np_sterling_ratio, np.array([sterling_ratio])))\n\n\nnp_annualised_risk = np_annualised_risk[1:].reshape(1,len(tickers))\nnp_sharpe_ratio = np_sharpe_ratio[1:].reshape(1,len(tickers))\nnp_sortino_ratio = np_sortino_ratio[1:].reshape(1,len(tickers))\nnp_calmar_ratio = np_calmar_ratio[1:].reshape(1,len(tickers))\nnp_sterling_ratio = np_sterling_ratio[1:].reshape(1,len(tickers))\nstock_annualised_risk = pd.DataFrame(data = np_annualised_risk, columns = tickers, index=[\"Annualised risk\"])\nstock_sharpe_ratio = pd.DataFrame(data = np_sharpe_ratio, columns = tickers, index=[\"Sharpe ratio\"])\nstock_sortino_ratio = pd.DataFrame(data = np_sortino_ratio, columns = tickers, index=[\"Sortino ratio\"])\nstock_calmar_ratio = pd.DataFrame(data = np_calmar_ratio, columns = tickers, index=[\"Calmar ratio\"])\nstock_sterling_ratio = pd.DataFrame(data = np_sterling_ratio, columns = tickers, index=[\"Sterling ratio\"])\nstock_RiskAdjustedReturn_parameters = pd.concat([stock_RiskAdjustedReturn_parameters, stock_annualised_risk, stock_sharpe_ratio, stock_sortino_ratio, stock_calmar_ratio, stock_sterling_ratio], axis=0)\n\n\npd.set_option('display.max_columns', None)\nfaulty_holding_period_stock\nstock_RiskAdjustedReturn_parameters\n\n\nstock_RiskAdjustedReturn_parameters.to_csv(data_directory + 'stock_Parameters.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\n\nprint(\"Sharpe Ratio: Greater the Sharpe ratio the greater the (risk–adjusted) return. Usually speaking a Sharpe ratio of 1.0 or greater is considered to be good and essentially implies that for every unit of risk you are assuming you are achieving an equal amount of return. In short, the larger the Sharpe ratio the better. One thing to closely consider, is the risk free rate used in the calculation which can greatly affect the final number. \")\nprint(\"Sortino Ratio: Higher the Sortino ratio, the better. A Sortino ratio greater than 2 is consider to be good\")\nprint(\"Calmar Ratio: Higher the Calmar ratio the better. Anything over 0.50 is considered to be good. A Calmar ratio of 3.0 to 5.0 is really good.\")\nprint(\"Sterling Ratio: Higher the Sterling ratio the better, showing that the investor is earning a higher return relative to the risk\")\nprint(\"Treynor Ratio: Higher ratio indicates a more favorable risk/return scenario. Keep in mind that Treynor Ratio values are based on past performance that may not be repeated in future performance. For negative values of Beta, the Ratio does not give meaningful values. When comparing two portfolios, the Ratio does not indicate the significance of the difference of the values, as they are ordinal. For example, a Treynor Ratio of 0.5 is better than one of 0.25, but not necessarily twice as good. The numerator is the excess return to the risk-free rate. The denominator is the Beta of the portfolio, or, in other words, a measure of its systematic risk.\")\n\n\n# %%\ndef get_risk_adjusted_return_parameters():\n stock_RiskAdjustedReturn_parameters = pd.DataFrame()\n if (os.path.exists(data_directory + 'stock_Parameters.csv')):\n print('Reading from local file. Path:' + data_directory + 'stock_Parameters.csv')\n stock_RiskAdjustedReturn_parameters = pd.read_csv(data_directory + 'stock_Parameters.csv', delimiter=',', index_col=0 )\n else:\n print(\"File does not exist!!\")\n return stock_RiskAdjustedReturn_parameters\n\nstock_RiskAdjustedReturn_parameters = get_risk_adjusted_return_parameters()\nstock_RiskAdjustedReturn_parameters\n\ndf_all_stock_returns = None\nif (os.path.exists(data_directory + 'AllStockReturns.csv')):\n df_all_stock_returns = pd.read_csv(data_directory + 'AllStockReturns.csv', header = 0, delimiter=',', index_col=0 )\nelse:\n print(\"File does not exist!!\")\n\ndf_all_stock_returns.head(3)\ndf_all_stock_returns.tail(3)\n\ndf_all_stock_details = None\nif (os.path.exists(data_directory + 'AllStockDetails.csv')):\n df_all_stock_details = pd.read_csv(data_directory + 'AllStockDetails.csv', header = 0, delimiter=',', index_col=0 )\nelse:\n print(\"File does not exist!!\")\n\ndf_all_stock_details.head(3)\ndf_all_stock_details.tail(3)\n\ndt.datetime.now().time()\n\n\ntry:\n #df_adj_close_percent_change.head()\n if 'df_adj_close_percent_change' not in globals(): #globals() is a superset of locals()\n df_adj_close_percent_change = get_adj_close_price_percentage_change_of_all_stocks()\nexcept Exception as e:\n exc_type, exc_value, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"Oops!\", exc_type, exc_value, fname, exc_tb.tb_lineno, \"occurred\", traceback.format_exc())\n print(\"df_adj_close_percent_change is not defined!\")\n\n\ndt.datetime.now().time()\ndf_adj_close_percent_change.tail(3)\n\ntry:\n #stock_RiskAdjustedReturn_parameters\n if 'stock_RiskAdjustedReturn_parameters' not in globals(): #globals() is a superset of locals()\n stock_RiskAdjustedReturn_parameters = get_risk_adjusted_return_parameters()\nexcept Exception as e:\n exc_type, exc_value, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"Oops!\", exc_type, exc_value, fname, exc_tb.tb_lineno, \"occurred\", traceback.format_exc())\n print(\"stock_RiskAdjustedReturn_parameters is not defined!\")\n \ndt.datetime.now().time()\nstock_RiskAdjustedReturn_parameters\n\ntry:\n #stock_RiskAdjustedReturn_parameters\n if 'stock_Outperform' not in globals(): #globals() is a superset of locals()\n num_of_rows = len(df_adj_close_percent_change.index)\n \n np_NSE = df_adj_close_percent_change[['^NSEI']].to_numpy().reshape(num_of_rows, 1)\n np_NSE = np.tile(np_NSE, (1, len(tickers)))\n np_stock = df_adj_close_percent_change.to_numpy()\n \n stock_Outperform = calc_stock_outperform(np_NSE, np_stock)\nexcept Exception as e:\n exc_type, exc_value, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"Oops!\", exc_type, exc_value, fname, exc_tb.tb_lineno, \"occurred\", traceback.format_exc())\n print(\"stock_Outperform is not defined!\")\n \n\n\n \n\n\n# %%\n\n\nlen(stock_RiskAdjustedReturn_parameters.columns)\nlen(df_all_stock_returns.T.columns)\nlen(df_adj_close_percent_change.columns)\nstock_RiskAdjustedReturn_parameters_transpose = stock_RiskAdjustedReturn_parameters.T\n\nbeta = list()\ntreynor_ratio = list()\njensens_alpha = list()\ntracking_error = list()\ninformation_ratio = list()\nidiosyncratic_volatility = list()\nappraisal_ratio = list()\nexpected_return = list()\n\nfor c in df_adj_close_percent_change.columns: #The first entry is ^NSEI\n df_cleaned_adj_close_percent_change = clean_dataset(valid_dataframe(df_adj_close_percent_change[['^NSEI', c]]))\n try:\n ann_return = float(stock_RiskAdjustedReturn_parameters[c][3])\n except ValueError:\n print(\"No Annualised return value for:\", c)\n ann_return = np.nan\n\n if(c == '^NSEI'):\n ann_return_benchmark = ann_return\n\n stock = np.array(df_cleaned_adj_close_percent_change[[c]])\n benchmark = np.array(df_cleaned_adj_close_percent_change[['^NSEI']])\n\n \n non_nan_column_values = stock[~np.isnan(stock)]\n #non_nan_indices_list = np.argwhere(~np.isnan(c))\n if (non_nan_column_values.size == 0): #All values in the cloumn are NaN\n print(\"The stock {} does not have any adjustment close price percentage data. Most likely it was just newly listed on the exchange\".format(c))\n continue\n\n reg_benchmark = LinearRegression().fit(benchmark.reshape(-1,1), benchmark.reshape(-1,1))\n reg_stock = LinearRegression().fit(benchmark.reshape(-1,1), stock.reshape(-1,1))\n #Calculate intercept coefficient (aka Alpha)\n A_benchmark = round(float(reg_benchmark.intercept_), 3)\n A_stock = round(float(reg_stock.intercept_), 3)\n #Calculate slope coefficient (aka Beta)\n B_benchmark = round(float(reg_benchmark.coef_), 1)\n B_stock = round(float(reg_stock.coef_), 1) \n # A beta greater than 1.0 implies the stock is more volatile and risky\n beta.append(B_stock) # https://www.educba.com/what-is-beta\n\n if(B_stock != 0):\n treynor_ratio.append((ann_return - annual_risk_free_interest_rate)/B_stock)\n else:\n treynor_ratio.append(0)\n ja = (ann_return - annual_risk_free_interest_rate) - B_stock * (ann_return_benchmark - annual_risk_free_interest_rate)\n jensens_alpha.append(ja)\n\n tr_error = stock_Outperform[c].dropna().std() * math.sqrt(12)\n tracking_error.append(tr_error)\n \n if (tr_error != 0):\n information_ratio.append((ann_return - ann_return_benchmark)/tr_error)\n else:\n information_ratio.append(0)\n i_volatility = stock_Abnormal[c].dropna().std() * math.sqrt(12)\n idiosyncratic_volatility.append(i_volatility)\n appraisal_ratio.append(ja / i_volatility)\n\n expt_return = annual_risk_free_interest_rate + (B_stock * equity_risk_premium) #equity_risk_premium: 8% - India Equity Risk Premium (2021) study: https://incwert.com/india-equity-risk-premium-2021/\n expected_return.append(expt_return) #equity_risk_premium: 8% - India Equity Risk Premium (2021) study: https://incwert.com/india-equity-risk-premium-2021/\n\n\ndt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\nstock_RiskAdjustedReturn_parameters_transpose[\"Expected return[Equity Risk Premium based]\"] = expected_return\nstock_RiskAdjustedReturn_parameters_transpose[\"Beta [Systematic Risk]\"] = beta\nstock_RiskAdjustedReturn_parameters_transpose[\"Treynor ratio\"] = treynor_ratio\nstock_RiskAdjustedReturn_parameters_transpose[\"Jensen's alpha\"] = jensens_alpha\nstock_RiskAdjustedReturn_parameters_transpose[\"Tracking error\"] = tracking_error\nstock_RiskAdjustedReturn_parameters_transpose[\"Information ratio\"] = information_ratio\nstock_RiskAdjustedReturn_parameters_transpose[\"Idiosyncratic volatility [Unsystematic Risk]\"] = idiosyncratic_volatility\nstock_RiskAdjustedReturn_parameters_transpose[\"Appraisal ratio\"] = appraisal_ratio\n\n\npd.set_option('display.max_columns', None)\n\n# 'Annualised return', 'Holding Period Years', 'Calmar ratio', 'Sterling ratio', 'Sortino ratio', 'Sharpe ratio', 'Max drawdown', 'Average drawdown', 'Annualised risk'\n\n#if (pegRatio is None and forward_eps is not None and earningsGrowth is not None and forward_eps != 0 and earningsGrowth != 0):\n# pegRatio = (current_price / forward_eps) / earningsGrowth\n# PEG=P/E / EPS Growth\n# where:\n# PEG=PEG ratio\n# P/E=Price-to-earnings ratio\n# EPS Growth=Annual earnings per share growth*\n# Although earnings growth rates can vary among different sectors, typically, a stock with a PEG of less than 1 is considered undervalued since its price is considered to be low compared to the company's expected earnings growth. A PEG greater than 1 might be considered overvalued since it might indicate the stock price is too high compared to the company's expected earnings growth. \n# the PEG ratio provides more insight into a stock's valuation. By providing a forward-looking perspective, the PEG is a valuable tool for investors in calculating a stock's future prospects. \n# Every investor wants an edge in predicting a company's future, but a company's earnings guidance statements may not be a reliable source. \n\n\nstk_details = df_all_stock_details[[\"marketCap\", \"sector\", \"industry\", \"revenuePerShare\", \"currentPrice\", \"forwardEps\", \"trailingPE\", \"52WeekChange\", \"fiftyDayAverage\", \"twoHundredDayAverage\", \"trailingAnnualDividendYield\", \"payoutRatio\", \"averageVolume10days\", \"regularMarketVolume\", \"dividendRate\", \"exDividendDate\", \"beta\", \"heldPercentInstitutions\", \"heldPercentInsiders\", \"earningsGrowth\", \"pegRatio\", \"bookValue\", \"enterpriseValue\", \"debtToEquity\", \"returnOnEquity\", \"profitMargins\", \"dividendYield\", \"longBusinessSummary\"]]\n\nstk_details.columns = [\"MarketCap\", \"Sector\", \"Industry\", \"RevenuePerShare\", \"CurrentPrice\", \"ForwardEPS\", \"TrailingPE(CurrentPrice/TrailingEPS) Lower better\", \"52WeekChange\", \"fiftyDayAverage\", \n\"TwoHundredDayAverage\", \"TrailingAnnualDividendYield\", \"PayoutRatio\", \"AverageVolume10days\", \"RegularMarketVolume\", \"DividendRate\", \"ExDividendDate\", \"Beta from YF\", \n\"HeldPercentInstitutions\", \"HeldPercentInsiders\", \"earningsGrowth\", \"pegRatio\", \"bookValue\", \"enterpriseValue\", \"debtToEquity(Ideally 0.3 or less)\", \n\"returnOnEquity(Ideally > 20%)\", \"profitMargins\", \"dividendYield\", \"longBusinessSummary\"]\n\ndt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\nall_ratios = pd.concat([stock_RiskAdjustedReturn_parameters_transpose.T, df_all_stock_returns.T, stk_details.T], axis=0)#.apply(pd.to_numeric, errors='ignore')\nall_ratios.head(10)\nall_ratios.to_csv(data_directory + 'all_ratios.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\ndt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\n# %%\n\nif (os.path.exists(data_directory + 'all_ratios.csv')):\n all_ratios = pd.read_csv(data_directory + 'all_ratios.csv', header = 0, delimiter=',', index_col=0 )\nelse:\n print(\"File does not exist!!\")\n\ndt.datetime.now().time()\n\nsorted_all_ratios = all_ratios.T\nsorted_all_ratios = sorted_all_ratios[[\"1 DAY\", \"1 WEEK\", \"1 MONTH\", \"6 MONTHS\", \"1 YEAR\", \"2 YEAR\", \"3 YEAR\", \"5 YEAR\", \"MAX\", \"Annualised return\", \"Holding Period Years\", \"Sector\", \"Industry\", \"MarketCap\", \"enterpriseValue\", \"earningsGrowth\", \"pegRatio\", \"CurrentPrice\", \n\"Calmar ratio\", \"Sterling ratio\", \"Sortino ratio\", 'Sharpe ratio', 'Treynor ratio', \"Jensen's alpha\", \"Information ratio\", \"Appraisal ratio\", \"Tracking error\", \n\"Max drawdown\", \"Average drawdown\", \"Expected return[Equity Risk Premium based]\", \"Annualised risk\", \"RevenuePerShare\", \"Beta [Systematic Risk]\", \n\"Idiosyncratic volatility [Unsystematic Risk]\", \"HeldPercentInsiders\", \"HeldPercentInstitutions\", \"bookValue\", \"debtToEquity(Ideally 0.3 or less)\", \n\"returnOnEquity(Ideally > 20%)\", \"profitMargins\", \"dividendYield\", \"ForwardEPS\", \"TrailingPE(CurrentPrice/TrailingEPS) Lower better\", \n\"52WeekChange\", \"fiftyDayAverage\", \"TwoHundredDayAverage\", \"TrailingAnnualDividendYield\", \"PayoutRatio\", \"AverageVolume10days\", \"RegularMarketVolume\", \"DividendRate\", \n\"ExDividendDate\", \"Beta from YF\", \"longBusinessSummary\"]]\n#sorted_all_ratios1 = sorted_all_ratios.sort_values(by=['Calmar ratio', 'Holding Period Years', 'Sterling ratio', 'Sortino ratio', 'Sharpe ratio', 'Treynor ratio', \"Jensen's alpha\", \"Information ratio\", \"Appraisal ratio\", \"Tracking error\", \"Idiosyncratic volatility [Unsystematic Risk]\"], ascending=False)\n#groupby_sector_sorted_all_ratios = sorted_all_ratios1.groupby([\"Sector\"], as_index=False)\n#groupby_sector_sorted_all_ratios.head(50)\n\ndt.datetime.now().time()\n\ngroupby_sector_sorted_all_ratios = sorted_all_ratios.groupby([\"Industry\"]).apply(lambda x: x.sort_values(by=['Calmar ratio', 'Holding Period Years', \"6 MONTHS\", \"1 YEAR\", \"Annualised return\", \"MarketCap\"], ascending=(False, False, False, False, False, False)))\ngroupby_sector_sorted_all_ratios.head(50)\ndt.datetime.now().time()\n\n\nsorted_all_ratios.to_csv(data_directory + 'sorted_all_ratios.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\ngroupby_sector_sorted_all_ratios.to_csv(data_directory + 'groupby_sector_sorted_all_ratios.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\n\nprint(\"Sharpe Ratio: Greater the Sharpe ratio the greater the (risk–adjusted) return. Usually speaking a Sharpe ratio of 1.0 or greater is considered to be good and essentially implies that for every unit of risk you are assuming you are achieving an equal amount of return. In short, the larger the Sharpe ratio the better. One thing to closely consider, is the risk free rate used in the calculation which can greatly affect the final number. Sharpe Ratio Grading Thresholds: Less than 1: Bad, 1 – 1.99: Adequate/good, 2 – 2.99: Very good, Greater than 3: Excellent\")\nprint(\"Sortino Ratio: Higher the Sortino ratio, the better. A Sortino ratio greater than 2 is consider to be good\")\nprint(\"Calmar Ratio: Higher the Calmar ratio the better. Anything over 0.50 is considered to be good. A Calmar ratio of 3.0 to 5.0 is really good.\")\nprint(\"Sterling Ratio: Higher the Sterling ratio the better, showing that the investor is earning a higher return relative to the risk\")\nprint(\"Tracking error: The consistency of generating excess returns is measured by the tracking error.\")\nprint(\"Idiosyncratic Risk: Sometimes referred to as unsystematic risk, is the inherent risk involved in investing in a specific asset, such as a stock. Idiosyncratic risk is the risk that is particular to a specific investment – as opposed to risk that affects the entire market or an entire investment portfolio. It is the opposite of systemic risk, which affects all investments within a given asset class. Systemic risks include things such as changing interest rates or inflation. Idiosyncratic risks are rooted in individual companies (or individual investments). Investors can mitigate idiosyncratic risks by diversifying their investment portfolios.\")\nprint(\"Information Ratio: The information ratio measures the risk-adjusted returns of a financial asset or portfolio relative to a certain benchmark. This ratio aims to show excess returns relative to the benchmark, as well as the consistency in generating the excess returns.\")\nprint(\"Appraisal Ratio: An appraisal ratio is a ratio used to measure the quality of a fund manager's investment-picking ability. The ratio shows how many units of active return the manager is producing per unit of risk. This is achieved by comparing the fund's alpha, the amount of excess return the manager has earned over the benchmark of the fund, to the portfolio's unsystematic risk or residual standard deviation.\")\nprint(\"Jensen's alpha: Alpha is a measure of the performance of an investment as compared to a suitable benchmark index. An alpha of one (the baseline value is zero) shows that the return on the investment during a specified time frame outperformed the overall market average by 1%. A negative alpha number reflects an investment that is underperforming as compared to the market average.\")\nprint(\"Treynor Ratio: Higher ratio indicates a more favorable risk/return scenario. Keep in mind that Treynor Ratio values are based on past performance that may not be repeated in future performance. For negative values of Beta, the Ratio does not give meaningful values. When comparing two portfolios, the Ratio does not indicate the significance of the difference of the values, as they are ordinal. For example, a Treynor Ratio of 0.5 is better than one of 0.25, but not necessarily twice as good. The numerator is the excess return to the risk-free rate. The denominator is the Beta of the portfolio, or, in other words, a measure of its systematic risk.\")\nprint(\"\"\"Beta: The beta coefficient can be interpreted as follows:\nβ =1 exactly as volatile as the market\nβ >1 more volatile than the market\nβ <1>0 less volatile than the market\nβ =0 uncorrelated to the market\nβ <0 negatively correlated to the market\"\"\")\n\n\n# %%\ngroupby_sector_sorted_all_ratios = None\nif (os.path.exists(data_directory + 'groupby_sector_sorted_all_ratios.csv')):\n groupby_sector_sorted_all_ratios = pd.read_csv(data_directory + 'groupby_sector_sorted_all_ratios.csv', header = 0, delimiter=',', index_col=0 )\nelse:\n print(\"File does not exist!!\")\n\n\n# %%\n########################################################################################################################################\n######################################### DOWNLOAD LATEST GSM, ASM and pledge reports from NSE website #################################\n########################################################################################################################################\n#import urllib.request\n#with urllib.request.urlopen('https://www.nseindia.com/reports/gsm') as f:\n# html = f.read().decode('utf-8')\n\nimport glob\n\ndef get_pledge_list():\n # https://www.nseindia.com/companies-listing/corporate-filings-pledged-data\n # CF-SAST-Pledged-Data-28-Dec-2021.csv\n df_pledge = pd.DataFrame()\n pledgefile = glob.glob(data_directory + r'CF-SAST-Pledged-Data-*.csv')[0]\n if (pledgefile):\n print(\"Pledgefile:\", pledgefile)\n # pointing the header to row# 0 assigns column name information to be parsed from the top row.\n df_pledge = pd.read_csv(pledgefile, sep=',', index_col=0, header=0)\n df_pledge = df_pledge[['PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]', '(%) PLEDGE / DEMAT']]\n df_pledge.replace(r\"^ +| +$\", r\"\", regex=True, inplace=True)\n #df_pledge.rename(columns = {'PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]' : '(%) PROMOTER SHARES PLEDGE / TOTAL ISSUED SHARES - LAST QUARTER'}, inplace = True)\n else:\n print(\"File does not exist!!\")\n\n return df_pledge\n\n\ndef get_asm_list():\n data_directory\n longterm_index = np.nan\n shortterm_index = np.nan\n\n if (os.path.exists(data_directory + 'asm-latest.csv')):\n # pointing the header to row# 0 assigns column name information to be parsed from the top row.\n asm = pd.read_csv(data_directory + 'asm-latest.csv', sep=',', index_col=0)\n\n df_asm = pd.DataFrame(asm)\n\n count = 0\n new_index = []\n for i in df_asm.index:\n if(i == 'Long Term'):\n longterm_index = count\n elif(i == 'Short Term'):\n shortterm_index = count\n count = count+1\n\n longterm_asm = df_asm[longterm_index+1:shortterm_index]\n longterm_asm['SYMBOL \\n'] = [s + '.NS' for s in longterm_asm['SYMBOL \\n']]\n longterm_asm.rename(columns = {'SYMBOL \\n':'SYMBOL'}, inplace = True)\n longterm_asm.rename(columns = {'ASM STAGE \\n':'LT ASM STAGE'}, inplace = True)\n longterm_asm.set_index(longterm_asm['SYMBOL'], inplace=True)\n longterm_asm = longterm_asm[['LT ASM STAGE']]\n\n shortterm_asm = df_asm[shortterm_index+1:]\n shortterm_asm['SYMBOL \\n'] = [s + '.NS' for s in shortterm_asm['SYMBOL \\n']]\n shortterm_asm.rename(columns = {'SYMBOL \\n':'SYMBOL'}, inplace = True)\n shortterm_asm.rename(columns = {'ASM STAGE \\n':'ST ASM STAGE'}, inplace = True)\n shortterm_asm.set_index(shortterm_asm['SYMBOL'], inplace=True)\n shortterm_asm = shortterm_asm[['ST ASM STAGE']]\n\n else:\n print(\"File does not exist!!\")\n\n return longterm_asm, shortterm_asm\n\ndef get_gsm_list():\n if (os.path.exists(data_directory + 'gsm-latest.csv')):\n gsm = pd.read_csv(data_directory + 'gsm-latest.csv', sep = ',', index_col=0) # pointing the header to row# 0 assigns column name information to be parsed from the top row.\n\n df_gsm = pd.DataFrame(gsm)\n df_gsm['SYMBOL \\n'] = [s + '.NS' for s in df_gsm['SYMBOL \\n']]\n df_gsm.rename(columns = {'SYMBOL \\n':'SYMBOL'}, inplace = True)\n df_gsm.rename(columns = {'GSM STAGE \\n':'GSM STAGE'}, inplace = True)\n df_gsm.set_index(df_gsm['SYMBOL'], inplace=True)\n df_gsm = df_gsm[['GSM STAGE']]\n else:\n print(\"File does not exist!!\")\n\n return df_gsm\n\ngsm = get_gsm_list()\ngsm.head()\n\nlongterm_asm, shortterm_asm = get_asm_list()\nlongterm_asm.head()\nshortterm_asm.head()\n\npledge_list = get_pledge_list()\npledge_list.head()\n\n\n# %%\n############################## LATEST ############################## \n\nprint(\"No. of stocks in pledge list:\", pledge_list.shape[0])\npledge_list.head(3)\ndf_all_stock_details.head(3)\n\ndf_all_stock_details.index.name = \"stock\"\ndf_all_stock_details['longName_lower'] = df_all_stock_details['longName'].str.lower()\npledge_list.index = pledge_list.index.str.replace(r\"^ +| +$\", r\"\", regex=True)\npledge_list['stock_lower'] = pledge_list.index.str.lower()\npledge_list1 = pledge_list.merge(df_all_stock_details, left_on=\"stock_lower\", right_on=\"longName_lower\", how=\"inner\")\n\npledge_list1.head()\n\nprint(\"pledge list mapping to longName:\", pledge_list1.shape[0])\n\ndf_all_stock_details['shortName_lower'] = df_all_stock_details['shortName'].str.lower()\npledge_list2 = pledge_list.merge(df_all_stock_details, left_on=\"stock_lower\", right_on=\"shortName_lower\", how=\"inner\")\n\npledge_list2.head()\nprint(\"pledge list mapping to shortName:\", pledge_list2.shape[0])\n\npledge_list1.set_index('symbol', inplace=True)\npledge_list2.set_index('symbol', inplace=True)\ndf_pledge = pd.concat([ pledge_list1, pledge_list2 ]).drop_duplicates(keep='first')#.reset_index(inplace=True) #(drop=True)\nprint(\"pledge list mapping to shortName & longName:\", df_pledge.shape[0])\ndf_pledge.head(3)\n\npledge_lst_not_mapped = pd.DataFrame(pledge_list[~pledge_list['stock_lower'].isin(df_pledge['stock_lower'])])\n\nprint(\"No. of pledges NOT mapped:\", len(pledge_lst_not_mapped))\npledge_lst_not_mapped.head()\n\n\n# %%\n############################## LATEST ############################## \n\n\ndef associate_plege_list_with_stock_symbol(pledge_lst_not_mapped, df_all_stock_details):\n\n print(type(pledge_lst_not_mapped))\n print(len(pledge_lst_not_mapped))\n print(\"No. of total stocks:\", df_all_stock_details.shape[0])\n non_nan_shortName_df_all_stock_details = df_all_stock_details['shortName'].fillna('', axis=0)\n\n print(\"No. of stocks having Non NaN shortName:\", non_nan_shortName_df_all_stock_details.shape[0])\n non_nan_longName_df_all_stock_details = df_all_stock_details['longName'].fillna('', axis=0)\n\n print(\"No. of stocks having Non NaN longName:\", non_nan_longName_df_all_stock_details.shape[0])\n\n pledge_lst_not_mapped.shape\n pledge_lst_not_mapped.head()\n pledge_lst_not_mapped['pledge name'] = pledge_lst_not_mapped.index\n pledge_lst_not_mapped['symbol'] = [None] * len(pledge_lst_not_mapped.index)\n pledge_lst_not_mapped.head()\n lst_of_pledge_list = pledge_lst_not_mapped.index.str.split(\" \")\n mask_1 = pd.DataFrame()\n mask_2 = pd.DataFrame()\n\n for row_num, x in enumerate(lst_of_pledge_list):\n for i in range(len(x), 0, -1):\n try:\n # Checks for both Capital, small letter & etc cases\n short_lst = df_all_stock_details[non_nan_shortName_df_all_stock_details.str.contains(' '.join(x[0:i]), regex=False, case=False)]\n if len(short_lst) == 1:\n # Bank Of Baroda\t0.00\t1.50\tBank Of Baroda\tCENTRALBK.NS\n # Bank Of India\t0.00\t1.13\tBank Of India\tCENTRALBK.NS\n # Bank of Maharashtra\t0.00\t0.86\tBank of Maharashtra\tCENTRALBK.NS\n pledge_lst_not_mapped['symbol'][row_num] = short_lst.index.values[0]\n break\n elif len(short_lst) > 0:\n #print(\"> 1:\", x, i, ' '.join(x[0:i]), short_lst.index.values)\n pass\n\n except ValueError:\n print(\"{} seems to have NaN value\".format(' '.join(x[0:i])))\n break\n\n for row_num, x in enumerate(lst_of_pledge_list):\n for i in range(len(x), 0, -1):\n try:\n # Checks for both Capital, small letter & etc cases\n long_lst = df_all_stock_details[non_nan_longName_df_all_stock_details.str.contains(' '.join(x[0:i]), regex=False, case=False)]\n\n if (len(long_lst) == 1):\n if (pledge_lst_not_mapped['symbol'][row_num] != None and pledge_lst_not_mapped['symbol'][row_num] != long_lst.index.values[0]):\n print(\"pledge_lst_not_mapped['symbol'][{}]:\".format(row_num), pledge_lst_not_mapped['symbol'][row_num], \"long_lst.index.values[0]:\", long_lst.index.values[0])\n #pledge_lst_not_mapped['symbol'][row_num] = None\n elif (pledge_lst_not_mapped['symbol'][row_num] == None):\n pledge_lst_not_mapped['symbol'][row_num] = long_lst.index.values[0]\n break\n elif len(short_lst) > 0:\n #print(\"> 1:\", x, i, ' '.join(x[0:i]), short_lst.index.values)\n pass\n\n except ValueError:\n print(\"{} seems to have NaN value\".format(' '.join(x[0:i])))\n break\n\n return pledge_lst_not_mapped\n\npledge_list_partly_associated_with_symbol = associate_plege_list_with_stock_symbol(pledge_lst_not_mapped, df_all_stock_details)\nprint(\"pledge_list_partly_associated_with_symbol:\", len(pledge_list_partly_associated_with_symbol))\nnon_na_pledge_list_partly_associated_with_symbol = pledge_list_partly_associated_with_symbol[pledge_list_partly_associated_with_symbol['symbol'].notna()]\nprint(\"non_na_pledge_list_partly_associated_with_symbol:\", len(non_na_pledge_list_partly_associated_with_symbol))\nnon_na_pledge_list_partly_associated_with_symbol.head(3)\nna_pledge_list_partly_associated_with_symbol = pledge_list_partly_associated_with_symbol[pledge_list_partly_associated_with_symbol['symbol'].isna()]\nprint(\"na_pledge_list_partly_associated_with_symbol:\", len(na_pledge_list_partly_associated_with_symbol))\nna_pledge_list_partly_associated_with_symbol.head(3)\n\n#na_pledge_list_partly_associated_with_symbol\nprint(\"df_all_stock_details.shape:\", df_all_stock_details.shape)\ndf_all_stock_details_with_pledge = non_na_pledge_list_partly_associated_with_symbol.merge( df_all_stock_details, left_on=\"symbol\", right_on=\"symbol\", how=\"right\")\ndf_all_stock_details_with_pledge.set_index('symbol', inplace=True)\ndf_all_stock_details_with_pledge1 = df_all_stock_details_with_pledge.drop(['pledge name'], axis=1)\nprint(\"df_all_stock_details_with_pledge1.shape:\", df_all_stock_details_with_pledge1.shape)\ndf_all_stock_details_with_pledge1.head(3)\n\nnon_associated_pledge_list = []\nfor jkl in df_all_stock_details_with_pledge1.index:\n if jkl not in df_pledge.index:\n non_associated_pledge_list.append(jkl)\n#print(non_associated_pledge_list)\nprint(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\nprint(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n\ndf_all_stock_details_with_pledge1.drop_duplicates(keep='first').head()\n\ndf_pledge1 = pd.concat([df_pledge, df_all_stock_details_with_pledge1], axis=0).reset_index()\ndf_nsei = df_pledge1[df_pledge1['symbol'] == '^NSEI'].set_index('symbol').sort_index()\ndf_pledge4 = df_pledge1[df_pledge1['symbol'] != '^NSEI'].drop_duplicates(subset=['symbol'], keep='first').dropna(subset=['symbol']).set_index('symbol').sort_index()\ndf_pledge4 = pd.concat([df_nsei, df_pledge4], axis=0)\n\nprint(\"df_pledge4.shape:\", df_pledge4.shape)\ndf_pledge4.head()\ndf_pledge4.to_csv(data_directory + 'pledge_list_with_stock_details.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None)\n\n\nmissed_stocks = []\nfor jkl in df_all_stock_details.index:\n if jkl not in df_pledge4.index:\n missed_stocks.append(jkl)\nmissed_stocks\n\n\n# %%\ndef get_financial(stock):\n financial = pd.DataFrame()\n financial_revenue = pd.DataFrame()\n financial_net_income = pd.DataFrame()\n if (os.path.exists(data_directory + stock + '_financials.csv')):\n financial_yearly = pd.read_csv(data_directory + stock + '_financials.csv', sep = ',', index_col=0, header=0).sort_index(axis=1) # pointing the header to row# 0 assigns column name information to be parsed from the top row.\n financial_quarterly = pd.read_csv(data_directory + stock + '_quarterly_financials.csv', sep = ',', index_col=0, header=0).sort_index(axis=1) # pointing the header to row# 0 assigns column name information to be parsed from the top row.\n financial = pd.concat([financial_yearly, financial_quarterly], axis=1, keys=['yearly', 'quarterly'])\n financial = financial.T\n try:\n financial_revenue = financial[['Total Revenue']].T\n financial_net_income = financial[['Net Income']].T\n except KeyError:\n print(\"Oops!\", stock, sys.exc_info(), \"occurred.\")\n else:\n print(\"File does not exist!!\")\n\n return financial_revenue, financial_net_income\n\n@dataclass\nclass DataframesDict:\n param: Dict[str, pd.DataFrame] = field(default_factory = lambda: ({\"stock\": pd.DataFrame()}))\n#df_financials_ = DataframesDict() #pd.DataFrame()\n\ndef get_financial_for_all_stocks():\n df_financial_revenue = pd.DataFrame()\n df_financial_net_income = pd.DataFrame()\n for i in tickers: #['AFFLE.NS', 'MAZDOCK.NS', 'PIIND.NS', 'UTIAMC.NS', 'NDRAUTO.NS', 'ARIHANTCAP.NS', 'IIFLSEC.NS', 'DHARAMSI.NS', 'DEEPAKNTR.NS', 'LATENTVIEW.NS', 'GLAND.NS', 'MEDICAMEQ.NS', 'SUVENPHAR.NS', 'RELAXO.NS', 'HAPPSTMNDS.NS', 'CAMS.NS', 'LTI.NS', 'AURUM.NS', 'INDIAMART.NS', 'NURECA.NS', 'UNIVPHOTO.NS', 'SBCL.NS', 'KIRLFER.NS', 'DEEPINDS.NS', 'LIKHITHA.NS', 'UNIDT.NS', 'BORORENEW.NS', 'IRCTC.NS', 'CLEAN.NS', 'ANURAS.NS', 'FAIRCHEMOR.NS', 'OAL.NS', 'PRINCEPIPE.NS', 'TATVA.NS', 'FINEORG.NS', 'AMIORG.NS', 'APCOTEXIND.NS', 'MTARTECH.NS', 'DCMNVL.NS', 'EASEMYTRIP.NS', 'ATGL.NS']:\n try:\n revenue, net_income = get_financial(i)\n revenue.index = [i]\n net_income.index = [i]\n df_financial_revenue = pd.concat([df_financial_revenue, revenue.divide(10000000)], axis=0, join='outer') #.divide(10000000) #Convert into crore\n df_financial_net_income = pd.concat([df_financial_net_income, net_income.divide(10000000)], axis=0, join='outer') #.divide(10000000) #Convert into crore\n except: # catch *all* exceptions\n exc_type, exc_value, exc_tb = sys.exc_info()\n print(\"stock:\", i, sys.exc_info(), \"occurred.\")\n \n return df_financial_revenue, df_financial_net_income\n\n\ndf_financial_revenue, df_financial_net_income = get_financial_for_all_stocks()\ndf_financial_revenue.index.name = 'stock'\ndf_financial_net_income.index.name = 'stock'\n\nnon_zero_df_financial_revenue = df_financial_revenue.replace(0,0.00001)\ndf_net_profit_margin = (df_financial_net_income.truediv(non_zero_df_financial_revenue)).multiply(100).round(5)\n\n\ndf_financial_revenue.to_csv(data_directory + 'financial_revenue_yearly_quarterly.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None)\ndf_financial_net_income.to_csv(data_directory + 'financial_net_income_yearly_quarterly.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None)\ndf_net_profit_margin.to_csv(data_directory + 'financial_net_profit_margin_yearly_quarterly.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None)\n\n\n# %%\ndf_financial_revenue['quarterly'].head()\n\ndf_financial_revenue['quarterly', '2020-12-31']\n\n\n# %%\n\nif (os.path.exists(data_directory + 'groupby_sector_sorted_all_ratios.csv')):\n #print('Reading from local file. Path:' + data_directory + i + '_info.csv')\n groupby_sector_sorted_all_ratios = pd.read_csv(data_directory + 'groupby_sector_sorted_all_ratios.csv', sep = ',', index_col=1) # pointing the header to row# 0 assigns column name information to be parsed from the top row.\nelse:\n print(\"File does not exist!!\")\n\ngroupby_sector_sorted_all_ratios = groupby_sector_sorted_all_ratios[~groupby_sector_sorted_all_ratios.index.duplicated(keep='first')]\n\ngroupby_sector_sorted_all_ratios.head(5)\n\ndf_groupby_sector_sorted_all_ratios = pd.DataFrame(groupby_sector_sorted_all_ratios)\ndf_groupby_sector_sorted_all_ratios.index.name = 'stock'\n\nresult = df_groupby_sector_sorted_all_ratios.join(shortterm_asm).join(longterm_asm).join(gsm).join(df_pledge4[['PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]', '(%) PLEDGE / DEMAT']]).join(df_net_profit_margin)\nresult.index.name = 'stock'\nresult.head()\n\nresult[\"Quarterly Average net_profit_margin\"] = result.iloc[:, 60:108].mean(axis=1)\nresult[\"Years Average net_profit_margin\"] = result.iloc[:, 109:132].mean(axis=1)\n\ndf_net_profit_margin.columns.values.tolist()\n\n#result = pd.concat([result, df_financial_net_income], axis=1, join=\"inner\")\n#result = result.join(df_financial_net_income)#.join(df_net_profit_margin)\nresult['50DMA>200DMA'] = np.where(result['fiftyDayAverage'] > result['TwoHundredDayAverage'], 'True', 'False')\n\ngroupby_sector_sorted_all_ratios = result.groupby([\"Industry\"]).apply(lambda x: x.sort_values(by=['Calmar ratio', 'Holding Period Years', 'Sterling ratio', 'Sortino ratio', 'Sharpe ratio', 'Treynor ratio', \"Jensen's alpha\", \"Information ratio\", \"Appraisal ratio\", \"Tracking error\"], ascending=False))\ngroupby_sector_sorted_all_ratios = groupby_sector_sorted_all_ratios[[\"Sector\", \"Industry\", \"1 DAY\", \"1 WEEK\", \"1 MONTH\", \"6 MONTHS\", \"1 YEAR\", \"2 YEAR\", \"3 YEAR\", \"5 YEAR\", \"MAX\", \"Annualised return\", \"Holding Period Years\", \"GSM STAGE\", \"LT ASM STAGE\", \"ST ASM STAGE\", \"MarketCap\", \"enterpriseValue\", \"earningsGrowth\", \"pegRatio\", \"CurrentPrice\", \n\"Calmar ratio\", \"Sterling ratio\", \"Sortino ratio\", 'Sharpe ratio', 'Treynor ratio', \"Jensen's alpha\", \"Information ratio\", \"Appraisal ratio\", \"Tracking error\", \n\"Max drawdown\", \"Average drawdown\", \"Expected return[Equity Risk Premium based]\", \"Annualised risk\", \"RevenuePerShare\", \"Beta [Systematic Risk]\", \n\"Idiosyncratic volatility [Unsystematic Risk]\", \"HeldPercentInsiders\", \"HeldPercentInstitutions\", 'PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]', \n'(%) PLEDGE / DEMAT', \"bookValue\", \"debtToEquity(Ideally 0.3 or less)\",\n\"returnOnEquity(Ideally > 20%)\", \"profitMargins\", \"dividendYield\", \"ForwardEPS\", \"TrailingPE(CurrentPrice/TrailingEPS) Lower better\", \n\"52WeekChange\", \"fiftyDayAverage\", \"TwoHundredDayAverage\", \"50DMA>200DMA\", \"TrailingAnnualDividendYield\", \"PayoutRatio\", \"AverageVolume10days\", \"RegularMarketVolume\", \"DividendRate\", \n\"ExDividendDate\", \"Beta from YF\", \"longBusinessSummary\"] + df_net_profit_margin.columns.values.tolist() + [\"Quarterly Average net_profit_margin\", \"Years Average net_profit_margin\"]]\n\ngroupby_sector_sorted_all_ratios = groupby_sector_sorted_all_ratios[~groupby_sector_sorted_all_ratios.index.duplicated(keep='first')]\n\ngroupby_sector_sorted_all_ratios.to_csv(data_directory + 'gsm_asm_groupby_sector_sorted_all_ratios.csv', sep=',', na_rep = 'N/A', header = True, encoding = 'utf-8', compression = None, date_format = '%Y-%m-%d')\n\n\n# %%\nif (os.path.exists(data_directory + 'gsm_asm_groupby_sector_sorted_all_ratios.csv')):\n groupby_sector_sorted_all_ratios = pd.read_csv(data_directory + 'gsm_asm_groupby_sector_sorted_all_ratios.csv', sep = ',', index_col=1) # pointing the header to row# 0 assigns column name information to be parsed from the top row.\nelse:\n print(\"File does not exist!!\")\n\n\n# %%\ndef display_scatter_graph(data, x_param, y_param, colour_param, circle_size_param):\n #####################################################################\n ############ plt.scatter NOT EFFICIENT FOR large dataset ############ \n #####################################################################\n rets = data #.dropna()#.sort_values(by=[\"Average drawdown\", 'Max drawdown'])\n\n labels = rets.index\n\n rng = np.random.RandomState(0)\n colors = rng.rand(100)\n title = y_param + \" vs \" + x_param + \" with \" + circle_size_param + \" (Size of circles) & \" + colour_param + \"(Colour of circles)\"\n fig=plt.figure(figsize=(30,20))\n plt.xlabel(x_param, fontsize=15)\n plt.ylabel(y_param, fontsize=15)\n plt.title(title, fontsize=30)\n plt.subplots_adjust(bottom = 0.1)\n colors[:len(rets.index)]\n plt.scatter(\n # data[:, 0], data[:, 1], marker='o', c=data[:, 2], s=data[:, 3] * 1500,\n #rets[\"Annualised return\"], rets[\"Annualised risk\"], marker='o', c=colors[:len(rets.index)], alpha=0.8, s=rets[\"Calmar ratio\"]*50, cmap='viridis')\n rets[x_param], rets[y_param], marker='o', c=rets[colour_param], alpha=0.4, s=rets[circle_size_param]*300, cmap='viridis')\n # cmap=plt.get_cmap('Spectral'))\n plt.colorbar(); # show color scale\n\n for label, x, y in zip(labels, rets[x_param], rets[y_param]):\n # plt.annotate(\n # label,\n # xy=(x, y), xytext=(-20, 20),\n # textcoords='offset points', ha='right', va='bottom',\n # bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n # arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))\n plt.annotate(\n label,\n xy=(x, y), xytext=(random.randint(-150, 150), random.randint(-150, 150)),\n textcoords='offset points', ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))\n plt.show()\n\n #fig.legend(labels=[\"Max drawdown [Lesser is better]\", \"Average drawdown [Lesser is better]\", \"Annualised return\"], loc='center' )\n plt.savefig(data_directory + title + \".svg\")\n\n\ndata_directory\nfile = 'gsm_asm_groupby_sector_sorted_all_ratios.csv'\ngsm_asm_groupby_sector_sorted_all_ratios = pd.DataFrame()\n\nif (os.path.exists(data_directory + file)):\n gsm_asm_groupby_sector_sorted_all_ratios = pd.read_csv(data_directory + file, sep = ',', index_col=1) # pointing the header to row# 0 assigns column name information to be parsed from the top row.\nelse:\n print(\"File does not exist!!\")\n\npd.set_option('display.max_columns', 500)\n#gsm_asm_groupby_sector_sorted_all_ratios.head()\n\nresult = gsm_asm_groupby_sector_sorted_all_ratios\n\ngood = result.loc[(result[\"1 YEAR\"] >= 50) & (result[\"Annualised return\"] >= 0.3) & (result['Calmar ratio'] >= 0.5) & \n(result['Holding Period Years'] >= 1.0) & (result['MarketCap'] >= 1000000000) & \n(result['debtToEquity(Ideally 0.3 or less)'] <= 20) & (result['HeldPercentInsiders'] >= 0.20) & (result['profitMargins'] > 0.08) &\n(np.logical_or(result['PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]'] == 0, \nresult['PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]'].isna())) & \n(np.logical_or(result['(%) PLEDGE / DEMAT'] <= 10, result['(%) PLEDGE / DEMAT'].isna())) &\n(result['50DMA>200DMA'] == True) & (result[\"Quarterly Average net_profit_margin\"] >= 10 ) & ( result[\"Years Average net_profit_margin\"] >= 8)]\ngood\n# DMA 50 > DMA 200 AND DMA 50 previous day < DMA 200 previous day\n#(result['Deviation from Ind PE'] <= 250) & (result['Deviation from Ind PB'] <= 250) ]\n#good\nlen(good.index)\n\n\ndisplay_scatter_graph(good, \"Annualised return\", \"Annualised risk\", \"Holding Period Years\", \"Calmar ratio\")\n\ndisplay_scatter_graph(good, \"Annualised return\", \"Annualised risk\", \"profitMargins\", \"earningsGrowth\")\n\n\n# %%\ndef display_annotated_stock_price(df_all_stock_price, title):\n fig, ax = plt.subplots(1, 1, figsize=(30, 20))\n ax.plot(df_all_stock_price.index ,df_all_stock_price)\n\n no_of_rows, no_of_columns = df_all_stock_price.shape\n plt.legend(labels=df_all_stock_price.columns)\n plt.title(title, fontsize=30)\n\n for i in df_all_stock_price.dropna().head(1).columns:\n df_cln = clean_dataset(pd.DataFrame(df_all_stock_price[i]))\n txt = i + \" \" + str((df_cln.head(1).values[0]).round(2))\n ax.annotate(xy=(df_cln.head(1).index, df_cln.head(1)[i]), xytext=(1,0), textcoords='offset points', text=txt, va='center')\n\n for i in df_all_stock_price.dropna().tail(1).columns:\n txt = i + \" \" + str((df_all_stock_price[i].dropna().tail(1).values[0]).round(2))\n ax.annotate(xy=(df_all_stock_price.dropna().tail(1).index, df_all_stock_price.dropna().tail(1)[i]), xytext=(5,0), textcoords='offset points', text=txt, va='center')\n\n return\n\ndisplay_annotated_stock_price(df_all_stock_price[good.index], 'Stock Price vs Time')\n\n\n# %%\ndef normalize_data(df):\n # df on input should contain only one column with the price data (plus dataframe index)\n min = df.min()\n max = df.max()\n x = df \n \n # time series normalization part\n # y will be a column in a dataframe\n y = (x - min) / (max - min)\n return y\n\ndf_all_stock_price_normalized = normalize_data(df_all_stock_price[good.index]).dropna()\nno_of_rows, no_of_columns = df_all_stock_price_normalized.shape\ndf_all_stock_price_normalized_wo_index = df_all_stock_price_normalized.reset_index(inplace = False, drop = True)\n\nlast_day = df_all_stock_price_normalized_wo_index.tail(1)\nlast_day\nx = last_day.to_dict('r')\n\ndct = {k: v for k, v in sorted(x[0].items(), key=lambda item: item[1], reverse=True)}\nsorted_stock = list(dct.keys())\n\n#df_all_stock_price_normalized.head()\ndf_all_stock_price_normalized_sorted = df_all_stock_price_normalized[sorted_stock]\ndf_all_stock_price_normalized_sorted.tail(1).T\n\nno_of_stocks_per_graph = 3\nstart = 0\nfor j in np.arange(no_of_stocks_per_graph,len(sorted_stock),no_of_stocks_per_graph):\n t = df_all_stock_price_normalized_sorted.T[start:j].T\n display_annotated_stock_price(t, 'Normalized Stock Price vs Time: ' + ', '.join(pd.Series(t[start:j].columns.values).to_list()))\n start = j\n\ndisplay_annotated_stock_price(df_all_stock_price_normalized_sorted, 'Normalized Stock Price vs Time')\n\n\n# %%\n#sorted_all_ratios = sorted_all_ratios[[\"Annualised return\", \"Expected return[Equity Risk Premium based]\", \"Annualised risk\", \"Calmar ratio\", \"Sterling ratio\", \"Sortino ratio\", 'Sharpe ratio', 'Treynor ratio', \"Jensen's alpha\", \"Information ratio\", \"Appraisal ratio\", \"Tracking error\", \"Max drawdown\", \"Average drawdown\", \"Beta [Systematic Risk]\", \"Idiosyncratic volatility [Unsystematic Risk]\", \"Semideviation\"]]\n#sorted_all_ratios = sorted_all_ratios.sort_values(by=['Calmar ratio', 'Sterling ratio', 'Sortino ratio', 'Sharpe ratio', 'Treynor ratio', \"Jensen's alpha\", \"Information ratio\", \"Appraisal ratio\", \"Tracking error\", \"Idiosyncratic volatility [Unsystematic Risk]\"], ascending=False)\n\n# too_good = result.loc[(result[\"Annualised return\"] >= 0.3) & (result['Calmar ratio'] >= 1.0) & \n# (result['Holding Period Years'] >= 1.0) & (result['MarketCap'] >= 1000000000) & \n# (result['debtToEquity(Ideally 0.3 or less)'] <= 40) & (result['HeldPercentInsiders'] >= 0.45) & (result['profitMargins'] > 0.1) &\n# (np.logical_or(result['PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]'] == 0, \n# result['PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]'].isna())) & \n# (np.logical_or(result['(%) PLEDGE / DEMAT'] <= 10, result['(%) PLEDGE / DEMAT'].isna())) &\n# (result['50DMA>200DMA'] == True)]\n\n\n# too_good = result.loc[(result[\"Annualised return\"] >= 0.3) & (result['Calmar ratio'] >= 0.8) & \n# (result['Holding Period Years'] >= 1.0) & (result['MarketCap'] >= 1000000000) & \n# (result['debtToEquity(Ideally 0.3 or less)'] <= 20) & (result['HeldPercentInsiders'] >= 0.30) & (result['profitMargins'] > 0.08) &\n# (np.logical_or(result['PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]'] == 0, \n# result['PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]'].isna())) & \n# (np.logical_or(result['(%) PLEDGE / DEMAT'] <= 10, result['(%) PLEDGE / DEMAT'].isna())) &\n# (result['50DMA>200DMA'] == True)]\n\ntoo_good = result.loc[(result[\"1 YEAR\"] >= 50) & (result[\"Annualised return\"] >= 0.3) & (result['Calmar ratio'] >= 0.5) & \n(result['Holding Period Years'] >= 1.0) & (result['MarketCap'] >= 1000000000) & \n(result['debtToEquity(Ideally 0.3 or less)'] <= 20) & (result['HeldPercentInsiders'] >= 0.20) & (result['profitMargins'] > 0.08) &\n(np.logical_or(result['PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]'] == 0, \nresult['PROMOTER SHARES ENCUMBERED AS OF LAST QUARTER % OF TOTAL SHARES [X/(A+B+C)]'].isna())) & \n(np.logical_or(result['(%) PLEDGE / DEMAT'] <= 10, result['(%) PLEDGE / DEMAT'].isna())) &\n(result['50DMA>200DMA'] == True) & (result[\"Quarterly Average net_profit_margin\"] >= 10 ) & ( result[\"Years Average net_profit_margin\"] >= 8)]\n\n#(result['Deviation from Ind PE'] <= 250) & (result['Deviation from Ind PB'] <= 250) ]\n\n#too_good = result.loc[(result[\"Annualised return\"] >= 0.5) & (result['Calmar ratio'] >= 3.0) & (result['Sharpe ratio'] >= 3.0) & (result['Holding Period Years'] >= 1.0) & (result['MarketCap'] >= 1000000000)]\ntoo_good\n\n\n# %%\n# xtick_parm1, xtick_parm2 are columns names within dataframe data\ndef display_bar_graph_two_parameters(data, xtick_parm1, xtick_parm2, xlabel, ylabel, title):\n xticks = data.columns\n\n fig=plt.figure(figsize=(24,16))\n ax=fig.add_axes([0,0,1,1])\n plt.xlabel(xlabel, fontsize=15)\n plt.ylabel(ylabel, fontsize=15)\n plt.title(title, fontsize=30)\n plt.xticks(range(len(xticks)), xticks, rotation='vertical', fontsize=15)\n #plt.scatter(range(len(rets.columns)), rets.loc[\"Calmar Ratio\", : ], s=area)\n\n ax.bar(range(len(xticks)), data.loc[xtick_parm1], color='r', alpha=0.5)\n ax.bar(range(len(xticks)), data.loc[xtick_parm2], color='b', alpha=0.5)\n #ax.bar(range(len(rets.columns)), np.clip(rets.loc[\"Sterling Ratio\", : ], -0.5, 1.5), color='b')\n\n # Annotate Text\n for i, ar in enumerate(data.loc[xtick_parm1]):\n if (ar > 0):\n ax.text(i-0.125, ar + (ar * 0.05), round(ar, 1), verticalalignment='center', fontsize=15)\n elif (ar < 0):\n ax.text(i-0.125, ar - (ar * 0.05), round(ar, 1), verticalalignment='center', fontsize=15)\n else:\n ax.text(i-0.125, ar, round(ar, 2), verticalalignment='center', fontsize=15)\n\n\n for i, ar in enumerate(data.loc[xtick_parm2]):\n if (ar > 0):\n ax.text(i-0.125, ar + (ar * 0.05), round(ar, 1), verticalalignment='center', fontsize=15)\n elif (ar < 0):\n ax.text(i-0.125, ar - (ar * 0.05), round(ar, 1), verticalalignment='center', fontsize=15)\n else:\n ax.text(i-0.125, ar, round(ar, 2), verticalalignment='center', fontsize=15)\n\n #p1 = patches.Rectangle((.57, -0.005), width=.33, height=.13, alpha=.1, angle=0.0, facecolor='red', label=\"red\", transform=fig.transFigure)\n #p2 = patches.Rectangle((.124,-0.005), width=.446, height=.13, alpha=.1, angle=0.0, facecolor='green', label=\"green\", transform=fig.transFigure)\n #fig.add_artist(p1)\n #fig.add_artist(p2)\n\n ax.legend(labels=[xtick_parm1, xtick_parm2])\n plt.savefig(data_directory + title + \".svg\")\n\n return\n\ntitle = \"Stock vs. MarketCap & Enterprise Value\"\n\n#rets = sorted_all_ratios.dropna().sort_values(by=['Annualised return'], ascending=False)\nrets = too_good[['MarketCap', 'enterpriseValue']].sort_values(by=['MarketCap'], ascending=False)\nrets = np.divide(rets, 10000000)\nrets = rets.T\nrets\n\ndisplay_bar_graph_two_parameters(rets, 'MarketCap', 'enterpriseValue', \"Stock\", \"Value in ₹ cr.\", title)\n\n\n# %%\nimport seaborn as sns\n\ndef get_financial(stock):\n financial = pd.DataFrame()\n if (os.path.exists(data_directory + stock + '_financials.csv')):\n financial_yearly = pd.read_csv(data_directory + stock + '_financials.csv', sep = ',', index_col=0, header=0).sort_index(axis=1) # pointing the header to row# 0 assigns column name information to be parsed from the top row.\n financial_quarterly = pd.read_csv(data_directory + stock + '_quarterly_financials.csv', sep = ',', index_col=0, header=0).sort_index(axis=1) # pointing the header to row# 0 assigns column name information to be parsed from the top row.\n financial = pd.concat([financial_yearly, financial_quarterly], axis=1)\n financial = financial.T\n\n financial.rename(columns = {'Total Revenue':'Revenue'}, inplace = True)\n financial.rename(columns = {'Net Income':'Net Income (Profit)'}, inplace = True)\n try:\n financial = financial[['Revenue', 'Net Income (Profit)']].T\n except KeyError:\n print(\"Oops!\", sys.exc_info(), \"occurred.\")\n else:\n print(\"File does not exist!!\")\n \n return financial\n\n\ntitle = \"Total Revenue & Net Income (Profit)\"\n\nfor i in ['ADANITRANS.NS']: #too_good.index[:1]:\n financial = get_financial(i).divide(10000000) #Convert into crore\n\n display_bar_graph_two_parameters(financial, 'Revenue', 'Net Income (Profit)', 'Period (Year / Quarter)', 'All values in ₹ cr.', title + \" \" + i)\n\n financial = financial.T.reset_index()\n financial.columns = [\"Date\", \"Revenue\", \"Net Income (Profit)\"]\n sns.catplot(x='Date', y='Revenue', hue='Revenue', data=financial, kind='bar', height=6, aspect=13/6, legend=True, palette='hls')\n sns.catplot(x='Date', y='Net Income (Profit)', hue='Net Income (Profit)', data=financial, kind='bar', height=6, aspect=13/6, legend=True, palette='hls')\n\n\n# %%\n\ndef display_multigrid_bar_graph(list_of_stocks, num_of_columns_in_subplot):\n length = len(list_of_stocks)\n length\n\n num_of_columns_in_subplot\n num_of_rows_in_subplot = math.ceil(length / num_of_columns_in_subplot)\n num_of_rows_in_subplot\n\n fig, ax_lst = plt.subplots(num_of_rows_in_subplot, num_of_columns_in_subplot) # a figure with a 2x2 grid of Axes\n fig.set_figheight(6 * 5)\n fig.set_figwidth(4 * 5)\n\n fig.suptitle('Total Revenue, Net Income (Profit) & Net Income (Profit) as % of Revenue vs Date', fontsize=20, y=0.99 ) # add a overall title\n\n for i, v in enumerate(list_of_stocks):\n print(i, v)\n r= int(i/num_of_columns_in_subplot)\n ax_lst[r,i-(num_of_columns_in_subplot*r)].set_title(list_of_stocks[i])\n try:\n financial = get_financial(v).divide(10000000).T.reset_index() #Convert into crore\n percent_profit_of_revenue = ((financial['Net Income (Profit)'] / financial['Revenue']) * 100).round(2)\n #percent_profit_of_revenue\n financial = pd.concat([financial, percent_profit_of_revenue], axis=1)\n financial.columns = [\"Date\", \"Revenue\", \"Net Income (Profit)\", \"Profit as % of revenue\"]\n ax_lst[r,i-(num_of_columns_in_subplot*r)].bar(range(len(financial['Revenue'])), financial['Revenue'], color='r', alpha=0.5)\n ax_lst[r,i-(num_of_columns_in_subplot*r)].bar(range(len(financial['Net Income (Profit)'])), financial['Net Income (Profit)'], color='b', alpha=0.5)\n #g = sns.catplot(x='Date', y='Revenue', hue='Revenue', data=f, kind='bar', legend=True, palette='hls', ax=ax_lst[r,i-(num_of_columns_in_subplot*r)]) # pass ax\n\n ax_lst[r,i-(num_of_columns_in_subplot*r)].set_xticks(range(len(financial['Date'])))\n ax_lst[r,i-(num_of_columns_in_subplot*r)].set_xticklabels(financial['Date'], rotation='vertical', fontsize=10)\n ax_lst[r,i-(num_of_columns_in_subplot*r)].set(xlabel='Period (Year / Quarter)', ylabel='Total Revenue / Net Income(Profit) in ₹ cr.')\n\n # Annotate Text\n for y, profit_percentage in enumerate(financial['Net Income (Profit)']):\n ax_lst[r,i-(num_of_columns_in_subplot*r)].text(y-0.2, financial['Net Income (Profit)'][y] * 1.20, financial[\"Profit as % of revenue\"][y], verticalalignment='center', fontsize=8)\n except KeyError:\n print(\"Oops!\", sys.exc_info(), \"occurred.\")\n except Exception as e:\n exc_type, exc_value, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"Oops!\", exc_type, exc_value, fname, exc_tb.tb_lineno, \"occurred\", traceback.format_exc())\n plt.tight_layout()\n\n\n\n\ndisplay_multigrid_bar_graph(too_good.index, 3)\n\n\n# %%\n\n\ndef add_annotate_text(y_axis_values, ax, lower_clip_limit=0, upper_clip_limit=100000):\n # # Annotate Text\n for i, ar in enumerate(y_axis_values):\n if (ar > 0.0):\n if (ar > upper_clip_limit):\n ax.text(i-0.25, upper_clip_limit, round(ar, 1), verticalalignment='center')\n else:\n ax.text(i-0.25, ar+0.05, round(ar, 1), verticalalignment='center')\n elif (ar < 0.0):\n if (ar > lower_clip_limit):\n ax.text(i-0.25, lower_clip_limit, round(ar, 2), verticalalignment='center')\n else:\n ax.text(i-0.25, ar-0.05, round(ar, 2), verticalalignment='center')\n else:\n ax.text(i-0.25, ar, round(ar, 2), verticalalignment='center')\n\n\ndef display_overlapping_multi_bar_graph(rets):\n title = \"Stock vs Annualised return and Drawdowns\"\n\n fig=plt.figure(figsize=(12,8))\n ax=fig.add_axes([0,0,1,1])\n plt.xlabel(\"Scrip\", fontsize=15)\n plt.ylabel(\"Return\", fontsize=15)\n plt.title(title, fontsize=20)\n plt.xticks(range(len(rets.columns)), rets.columns, rotation='vertical')\n\n ax.bar(range(len(rets.columns)), rets.loc[\"Max drawdown\", : ], color='aqua', alpha=0.5)\n add_annotate_text(rets.loc[\"Max drawdown\", : ], ax)\n ax.bar(range(len(rets.columns)), rets.loc[\"Average drawdown\", : ], color='magenta', alpha=0.5)\n add_annotate_text(rets.loc[\"Average drawdown\", : ], ax)\n lower_clip_limit = -0.5\n upper_clip_limit = 1.5\n ax.bar(range(len(rets.columns)), np.clip(rets.loc[\"Annualised return\", : ], lower_clip_limit, upper_clip_limit), color='gold', alpha=0.5)\n add_annotate_text(rets.loc['Annualised return', : ], ax, lower_clip_limit, upper_clip_limit)\n\n\n ax.legend(labels=[\"Max drawdown [Lesser is better]\", \"Average drawdown [Lesser is better]\", \"Annualised return\"], loc='center' )\n plt.savefig(data_directory + title + \".svg\")\n\n\nrets = too_good[['Annualised return', 'Average drawdown', 'Max drawdown']].dropna().sort_values(by=[\"Average drawdown\", 'Max drawdown'])\nrets = rets.T\ndisplay_overlapping_multi_bar_graph(rets)\n\n\n# %%\n#color='rgbycmk'\n#ax.set_color_cycle(colors)\n\ndef display_hortzontal_subplots(rets):\n # using subplot function and creating plot one\n # row 2, column 1, count 1\n fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=False, gridspec_kw={'height_ratios':[3,2]}, figsize=(24,20))\n fig.suptitle(\"Stock vs Annualised return and Dropdown\", fontsize=30)\n #ax[0].plot(x, y, color='red')\n #ax[1].plot(x, y, color='blue')\n plt.gca().set_xlabel('Stock', fontsize=15)\n ax[0].set_xticks(range(len(rets.columns)))\n ax[0].set_xticklabels(rets.columns, rotation='vertical')\n ax[0].title.set_text('Stock vs Max drawdown and Average drawdown')\n ax[0].set_xlabel('Stock', fontsize=15)\n ax[0].set_ylabel('Max drawdown and Average drawdown', fontsize=15)\n ax[0].bar(range(len(rets.columns)), rets.loc[\"Max drawdown\", : ], color='aqua', alpha=0.5)\n ax[0].bar(range(len(rets.columns)), rets.loc[\"Average drawdown\", : ], color='magenta', alpha=0.5)\n\n ax[1].set_xticks(range(len(rets.columns)))\n ax[1].set_xticklabels(rets.columns, rotation='vertical')\n ax[1].title.set_text('Stock vs Annualised return')\n ax[1].set_xlabel('Stock', fontsize=15)\n ax[1].set_ylabel('Annualised return', fontsize=15)\n ax[1].bar(range(len(rets.columns)), np.clip(rets.loc[\"Annualised return\", : ],-100, 150 ), color='gold', alpha=0.5)\n\n plt.subplots_adjust(left=1,\n bottom=0.8, \n right=2, \n top=1, \n wspace=1,\n hspace=1)\n fig.tight_layout()\n \n # show plot\n plt.show()\n\nrets = too_good.sort_values(by=[\"Average drawdown\", 'Max drawdown'])\nrets.head()\nrets = rets.T\ndisplay_hortzontal_subplots(rets)\n\n\n# %%\n#index = [\"Annualised return\", \"Expected return\", \"Annualised risk\", \"Sharpe ratio\", \"Beta [Systematic Risk]\", \n# \"Treynor ratio\", \"Jensen's alpha\", \"Tracking error\", \"Information ratio\", \"Idiosyncratic volatility [Unsystematic Risk]\", \"Appraisal ratio\", \n# \"Semideviation\", \"Max drawdown\", \"Average drawdown\", \"Sortino ratio\", \"Calmar ratio\", \"Sterling ratio\"]\n\n\ndef display_multibar_graph(rets, title):\n bar_width=0.3\n\n X = np.arange(len(rets.index.values))\n fig=plt.figure(figsize=(30,20))\n ax=fig.add_axes([0,0,1,1])\n plt.xlabel(\"Stock\", fontsize=15)\n plt.ylabel(\"Risks\", fontsize=15)\n plt.title(title, fontsize=20)\n plt.xticks(X+range(len(rets.index.values))+(bar_width*2), rets.index.values, rotation='vertical')\n\n # ax.bar(X + range(len(rets.columns))+0.0, rets.loc[\"Beta [Systematic Risk]\", : ], color='r', alpha=0.5, width=bar_width)\n # ax.bar(X + range(len(rets.columns))+(bar_width*1), rets.loc[\"Treynor ratio\", : ], color='pink', alpha=0.5, width=bar_width)\n # ax.bar(X + range(len(rets.columns))+(bar_width*2), rets.loc[\"Appraisal ratio\", : ], color='deepskyblue', alpha=0.5, width=bar_width)\n # ax.bar(X + range(len(rets.columns))+(bar_width*3), rets.loc[\"Semideviation\", : ], color='lawngreen', alpha=0.5, width=bar_width)\n\n ax.bar(X + range(len(rets.index.values))+0.0, rets[\"Beta [Systematic Risk]\"], color='r', alpha=0.5, width=bar_width)\n ax.bar(X + range(len(rets.index.values))+(bar_width*1), rets[\"Treynor ratio\"], color='pink', alpha=0.5, width=bar_width)\n ax.bar(X + range(len(rets.index.values))+(bar_width*2), rets[\"Appraisal ratio\"], color='deepskyblue', alpha=0.5, width=bar_width)\n ax.bar(X + range(len(rets.index.values))+(bar_width*3), rets[\"Calmar ratio\"], color='peru', alpha=0.5, width=bar_width)\n ax.bar(X + range(len(rets.index.values))+(bar_width*4), rets[\"Information ratio\"], color='black', alpha=0.5, width=bar_width)\n\n ax.bar(X + range(len(rets.index.values))+(bar_width*1), rets[\"Idiosyncratic volatility [Unsystematic Risk]\"], color='g', alpha=0.5, width=bar_width)\n #ax.bar(X + range(len(rets.index.values))+(bar_width*2), rets[\"Annualised return\"], color='b', alpha=0.5, width=bar_width)\n #ax.bar(X + range(len(rets.index.values))+(bar_width*4), rets.loc[\"Jensen's alpha\", : ], color='aqua', alpha=0.5, width=bar_width)\n #ax.bar(X + range(len(rets.index.values))+(bar_width*5), rets.loc[\"Tracking error\", : ], color='grey', alpha=0.5, width=bar_width)\n #ax.bar(X + range(len(rets.index.values))+(bar_width*7), rets[\"Annualised return\"], color='orange', alpha=0.5, width=bar_width)\n #ax.bar(X + range(len(rets.index.values))+(bar_width*8), rets.loc[\"Expected return\", : ], color='lawngreen', alpha=0.5, width=bar_width)\n #ax.bar(X + range(len(rets.index.values))+(bar_width*9), rets[\"Annualised risk\"], color='peru', alpha=0.5, width=bar_width)\n #ax.bar(X + range(len(rets.index.values))+(bar_width*10), rets.loc[\"Information ratio\", : ], color='black', alpha=0.5, width=bar_width)\n\n plt.legend(labels=[\"Beta [Systematic Risk]\", \"Treynor ratio\", \"Appraisal ratio\", \"Calmar ratio\", \"Information ratio\", \"Idiosyncratic volatility [Unsystematic Risk]\"])\n plt.grid(axis = 'y')\n plt.show()\n plt.savefig(data_directory + title + \".svg\")\n\n\n\ntitle = \"Stock vs Systematic and Unsystematic Risks\"\nrets = too_good\n\ndisplay_multibar_graph(rets, title)\n\n\n# %%\ndef graph_with_two_y_axes(rets, x_param, y_param1, y_param2, title, color1, color2):\n #create basic bar plot\n fig, ax = plt.subplots()\n ax.bar(range(len(rets.columns)), rets.loc[y_param1, : ], color=color1)\n fig.set_figheight(20)\n fig.set_figwidth(30)\n\n # Set the ticks and ticklabels for all axes\n plt.setp(ax, xticks=range(len(x_param)), xticklabels= x_param, )\n ax.set_title('Stock')\n ax.set_xticks(range(len(x_param)), )\n ax.set_xticklabels(x_param)\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n ax.tick_params(axis='x', colors='lightblue')\n ax.set_ylabel(y_param1)\n #specify axis colors\n ax.tick_params(axis='y', colors=color1)\n ax.yaxis.label.set_color(color1)\n\n ax2 = ax.twinx()\n ax2.plot(range(len(x_param)), rets.loc[y_param2, :], color=color2, marker=\"D\", ms=line_size)\n ax2.set_ylabel(y_param2)\n ax2.yaxis.label.set_color(color2)\n ax2.tick_params(axis='y', colors=color2)\n\n #display Pareto chart\n fig.legend(labels=[y_param1, y_param2, \"Annualised return\"], loc='upper center')\n plt.show()\n plt.savefig(data_directory + title + \".svg\")\n\n\nrets = too_good.T\n\ncolor1 = 'pink'\ncolor2 = 'green'\nline_size = 6\ntitle = \"Stock vs Beta [Systematic Risk] and Idiosyncratic volatility [Unsystematic Risk]\"\n\ngraph_with_two_y_axes(rets, rets.columns, \"Beta [Systematic Risk]\", \"Idiosyncratic volatility [Unsystematic Risk]\", title, color1, color2 )\n\n\n# %%\ndef scatter_graph_with_annotation(rets, x_param, y_param, title):\n area = np.pi*50.0\n # sns.set(style='darkgrid')\n plt.figure(figsize=(24,16))\n plt.scatter(rets[x_param], rets[y_param], s=area)\n plt.xlabel(x_param, fontsize=15)\n plt.ylabel(y_param, fontsize=15)\n plt.title(title, fontsize=20)\n\n for label, x, y in zip(rets.index, rets[x_param], rets[y_param]) : \n plt.annotate(label, xy=(x,y), xytext=(50, 0), \n textcoords='offset points', ha='center', va='bottom',color='blue',\n bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.3),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.95', \n color='r'))\n\n plt.savefig(data_directory + title + \".svg\")\n\n\n\nrets = too_good#.dropna()\ntitle = \"Return vs. Risk for Stocks [Non Normalised]\"\n\nscatter_graph_with_annotation(rets, \"Annualised return\", \"Annualised risk\", title)\n\n\n# %%\n\n\ndef display_stacked_bar_graph(rets, title):\n fig=plt.figure(figsize=(12,8))\n ax=fig.add_axes([0,0,1,1])\n\n plt.xlabel(\"Scrip\", fontsize=15)\n plt.ylabel(\"Calmar Ratio\", fontsize=15)\n plt.title(title, fontsize=20)\n plt.xticks(range(len(rets.index)), rets.index, rotation='vertical')\n\n #############################################################\n # Data has been clipped for Outlier Stocks #\n #############################################################\n #ax.bar(range(len(rets.columns)), np.clip(rets[\"Calmar Ratio\"], -0.5, 1.5), color='r', alpha=0.5)\n #ax.bar(range(len(rets.index)), rets.loc[\"Calmar Ratio\"], color='r', alpha=0.5)\n #ax.bar(range(len(rets.columns)), np.clip(rets[\"Sortino Ratio\"], -0.5, 1.5), color='g', alpha=0.5)\n ax.bar(range(len(rets[\"Sortino ratio\"])), rets[\"Sortino ratio\"], color='g', alpha=0.5)\n ax.bar(range(len(rets[\"Calmar ratio\"])), rets[\"Calmar ratio\"], color='r', alpha=0.5)\n\n plt.legend(labels=[\"Calmar ratio\", \"Sortino ratio\"])\n plt.savefig(data_directory + title + \".svg\")\n\n\ntitle = \"Scrip vs Calmar Ratio Sortino Ratio\"\nrets = too_good[[\"Sortino ratio\", \"Calmar ratio\"]] \nrets[\"Sortino ratio\"]\ndisplay_stacked_bar_graph(rets, title)\n\n\n# %%\ndef cumulative_returns(returns):\n \n return returns\n\ndef downside_risk(returns, risk_free=0):\n adj_returns = returns - risk_free\n sqr_downside = np.square(np.clip(adj_returns, np.NINF, 0))\n return np.sqrt(np.nanmean(sqr_downside) * 252)\n\ndef sortino(returns, risk_free=0):\n adj_returns = returns - risk_free\n drisk = downside_risk(adj_returns)\n if drisk == 0:\n return np.nan\n return (np.nanmean(adj_returns) * np.sqrt(252)) / drisk\n\ndef annual_returns(returns):\n num_years = len(returns) / 252\n cum_ret_final = (returns + 1).prod().squeeze()\n return cum_ret_final ** (1 / num_years) - 1\n\ndef calmar(returns):\n max_dd = max_drawdown(cumulative_returns(returns))\n if max_dd < 0:\n return annual_returns(returns) / abs(max_dd)\n return np.nan\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.minimum", "numpy.sqrt", "pandas.Series", "numpy.nan_to_num", "pandas.DataFrame", "numpy.concatenate", "numpy.add", "numpy.nanmean", "numpy.where", "numpy.divide", "numpy.hstack", "matplotlib.pyplot.gca", "pandas.read_csv", "matplotlib.pyplot.tight_layout", "numpy.clip", "numpy.subtract", "matplotlib.pyplot.subplots_adjust", "pandas.set_option", "numpy.zeros", "matplotlib.pyplot.figure", "pandas.concat", "matplotlib.pyplot.title", "numpy.multiply", "numpy.isnan", "matplotlib.pyplot.savefig", "numpy.timedelta64", "pandas.DataFrame.from_dict", "numpy.random.RandomState", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "matplotlib.pyplot.subplots", "numpy.datetime64", "matplotlib.pyplot.colorbar", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.grid", "numpy.prod", "matplotlib.pyplot.xlabel", "numpy.vstack" ] ]
moyiming1/Retrosynthesis-pathway-ranking
[ "380f31189d09395d0de911759b8bcea436b559b2" ]
[ "tree_lstm/train_valid_test.py" ]
[ "import os, sys\n\nproject_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(project_path)\n\nimport torch\nimport math\n\nfrom tree_lstm.treeLSTM_model import topk_accuracy\nfrom utils.scheduler import NoamLR, SinexpLR\nimport time\n\n\ndef train(model,\n loss_fn,\n optimizer,\n data_loader,\n scheduler,\n num_data,\n batch_size,\n device,\n n_iter,\n epoch,\n log_frequency):\n t0 = time.time()\n total_step = math.ceil(num_data // batch_size)\n loss_sum = 0\n metric_sum = [0] * 5\n\n step_count = 0\n for i, batch in enumerate(data_loader):\n pfp = batch['pfp'].to(device)\n rxnfp = batch['rxnfp'].to(device)\n adjacency_list = batch['adjacency_list'].to(device)\n node_order = batch['node_order'].to(device)\n edge_order = batch['edge_order'].to(device)\n num_nodes = batch['num_nodes']\n num_trees = batch['num_trees']\n batch_size = batch['batch_size']\n # print('converting to device: ', time.time()-t00, ' s')\n # Forward pass\n scores = model(pfp, rxnfp, adjacency_list, node_order, edge_order, num_nodes)\n loss = loss_fn(scores, num_trees, device=device)\n\n acc = topk_accuracy(scores, num_trees, topk=(1, 5, 10, 30, 100))\n # print('model forward: ', time.time() - t00, ' s')\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # print('optimizing: ', time.time() - t00, ' s')\n if isinstance(scheduler, NoamLR) or isinstance(scheduler, SinexpLR):\n scheduler.step()\n\n # calculate status\n loss_sum += loss.item()\n metric_sum = [x + y for x, y in zip(metric_sum, acc)]\n step_count += 1\n\n n_iter += batch_size\n\n # print log\n if (i + 1) % log_frequency == 0:\n loss_avg = loss_sum / step_count\n metric_mean = [x / step_count for x in metric_sum]\n lr = scheduler.get_lr()[0]\n\n loss_sum = 0\n metric_sum = [0] * 5\n step_count = 0\n\n t = time.time()\n # print(scores)\n print('Training: Epoch {}, step {}/{}, time {:.2f} h/{:.2f} h, '\n 'Loss: {:.4f}. Acc: top1 {:.3f}, top5 {:.3f}, '\n 'top10 {:.3f}, top30 {:.3f}, top100 {:.3f}, lr {:.5f}'\n .format(epoch, i, total_step,\n (t - t0) / 3600, (t - t0) * total_step / (i + 1) / 3600,\n loss_avg, *metric_mean, lr))\n return n_iter\n\n\ndef valid(model,\n loss_fn,\n data_loader,\n num_data,\n batch_size,\n device,\n epoch,\n log_frequency):\n loss_sum = 0\n metric_sum = [0] * 5\n\n loss_avg = 0\n metric_mean = [0] * 5\n with torch.no_grad():\n for i, batch in enumerate(data_loader):\n pfp = batch['pfp'].to(device)\n rxnfp = batch['rxnfp'].to(device)\n adjacency_list = batch['adjacency_list'].to(device)\n node_order = batch['node_order'].to(device)\n edge_order = batch['edge_order'].to(device)\n num_nodes = batch['num_nodes']\n num_trees = batch['num_trees']\n\n # Forward pass\n scores = model(pfp, rxnfp, adjacency_list, node_order, edge_order, num_nodes)\n loss = loss_fn(scores, num_trees, device=device)\n acc = topk_accuracy(scores, num_trees, topk=(1, 5, 10, 30, 100))\n\n # calculate status\n loss_sum += loss.item()\n metric_sum = [x + y for x, y in zip(metric_sum, acc)]\n # print log\n if (i + 1) % log_frequency == 0:\n loss_avg = loss_sum / (i + 1)\n metric_mean = [x / (i + 1) for x in metric_sum]\n\n print('Validation: Epoch {}, step {}/{}, Loss: {:.4f}. Acc: top1 {:.3f}, top5 {:.3f}, '\n 'top10 {:.3f}, top30 {:.3f}, top100 {:.3f}'\n .format(epoch, i, math.ceil(num_data // batch_size), loss_avg, *metric_mean))\n return loss_avg, metric_mean\n\n\ndef test(model,\n loss_fn,\n data_loader,\n batch_size,\n device,\n log_frequency,\n num_data):\n model.eval()\n topk = (1, 5, 10, 30, 50, 100)\n loss_sum = 0\n metric_sum = [0] * len(topk)\n\n loss_avg = 0\n metric_mean = [0] * len(topk)\n with torch.no_grad():\n for i, batch in enumerate(data_loader):\n pfp = batch['pfp'].to(device)\n rxnfp = batch['rxnfp'].to(device)\n adjacency_list = batch['adjacency_list'].to(device)\n node_order = batch['node_order'].to(device)\n edge_order = batch['edge_order'].to(device)\n num_nodes = batch['num_nodes']\n num_trees = batch['num_trees']\n # Forward pass\n scores = model(pfp, rxnfp, adjacency_list, node_order, edge_order, num_nodes)\n loss = loss_fn(scores, num_trees, device=device)\n acc = topk_accuracy(scores, num_trees, topk=topk)\n\n # calculate status\n loss_sum += loss.item()\n metric_sum = [x + y for x, y in zip(metric_sum, acc)]\n # print log\n if (i + 1) % log_frequency == 0:\n loss_avg = loss_sum / (i + 1)\n metric_mean = [x / (i + 1) for x in metric_sum]\n\n print('Test: step {}/{}, Loss: {:.4f}. Acc: top1 {:.3f}, top5 {:.3f}, '\n 'top10 {:.3f}, top30 {:.3f}, top50 {:.3f}, top100 {:.3f}'\n .format(i, math.ceil(num_data // batch_size), loss_avg, *metric_mean))\n return loss_avg, metric_mean\n" ]
[ [ "torch.no_grad" ] ]
keberhart/python-skyfield
[ "16fa511c86092aa88003b493d0f1b0708c0ec4de" ]
[ "skyfield/tests/test_topos.py" ]
[ "from assay import assert_raises\nfrom numpy import abs, arange, sqrt\n\nfrom skyfield import constants\nfrom skyfield.api import Distance, load, wgs84, wms\nfrom skyfield.functions import length_of\nfrom skyfield.positionlib import Apparent, Barycentric\nfrom skyfield.toposlib import ITRSPosition, iers2010\n\nangle = (-15, 15, 35, 45)\n\ndef ts():\n yield load.timescale()\n\ndef test_latitude_longitude_elevation_str_and_repr():\n w = wgs84.latlon(36.7138, -112.2169, 2400.0)\n assert str(w) == ('WGS84 latitude +36.7138 N'\n ' longitude -112.2169 E elevation 2400.0 m')\n assert repr(w) == ('<GeographicPosition WGS84 latitude +36.7138 N'\n ' longitude -112.2169 E elevation 2400.0 m>')\n\n w = wgs84.latlon([1.0, 2.0], [3.0, 4.0], [5.0, 6.0])\n assert str(w) == (\n 'WGS84 latitude [+1.0000 +2.0000] N'\n ' longitude [3.0000 4.0000] E'\n ' elevation [5.0 6.0] m'\n )\n assert repr(w) == '<GeographicPosition {0}>'.format(w)\n\n w = wgs84.latlon(arange(6.0), arange(10.0, 16.0), arange(20.0, 26.0))\n assert str(w) == (\n 'WGS84 latitude [+0.0000 +1.0000 ... +4.0000 +5.0000] N'\n ' longitude [10.0000 11.0000 ... 14.0000 15.0000] E'\n ' elevation [20.0 21.0 ... 24.0 25.0] m'\n )\n assert repr(w) == '<GeographicPosition {0}>'.format(w)\n\ndef test_raw_itrs_position():\n d = Distance(au=[1, 2, 3])\n p = ITRSPosition(d)\n ts = load.timescale()\n t = ts.utc(2020, 12, 16, 12, 59)\n p.at(t)\n\ndef test_wgs84_velocity_matches_actual_motion():\n # It looks like this is a sweet spot for accuracy: presumably a\n # short enough fraction of a second that the vector does not time to\n # change direction much, but long enough that the direction does not\n # get lost down in the noise.\n factor = 300.0\n\n ts = load.timescale()\n t = ts.utc(2019, 11, 2, 3, 53, [0, 1.0 / factor])\n jacob = wgs84.latlon(36.7138, -112.2169)\n p = jacob.at(t)\n velocity1 = p.position.km[:,1] - p.position.km[:,0]\n velocity2 = p.velocity.km_per_s[:,0]\n assert length_of(velocity2 - factor * velocity1) < 0.0007\n\ndef test_lst():\n ts = load.timescale()\n ts.delta_t_table = [-1e99, 1e99], [69.363285] * 2 # from finals2000A.all\n t = ts.utc(2020, 11, 27, 15, 34)\n top = wgs84.latlon(0.0, 0.0)\n expected = 20.0336663100 # see \"authorities/horizons-lst\"\n actual = top.lst_hours_at(t)\n difference_mas = (actual - expected) * 3600 * 15 * 1e3\n horizons_ra_offset_mas = 51.25\n difference_mas -= horizons_ra_offset_mas\n assert abs(difference_mas) < 1.0\n\ndef test_itrs_xyz_attribute_and_itrf_xyz_method():\n top = wgs84.latlon(45.0, 0.0, elevation_m=constants.AU_M - constants.ERAD)\n\n x, y, z = top.itrs_xyz.au\n assert abs(x - sqrt(0.5)) < 2e-7\n assert abs(y - 0.0) < 1e-14\n assert abs(z - sqrt(0.5)) < 2e-7\n\n ts = load.timescale()\n t = ts.utc(2019, 11, 2, 3, 53)\n x, y, z = top.at(t).itrf_xyz().au\n assert abs(x - sqrt(0.5)) < 1e-4\n assert abs(y - 0.0) < 1e-14\n assert abs(z - sqrt(0.5)) < 1e-4\n\ndef test_polar_motion_when_computing_topos_position(ts):\n xp_arcseconds = 11.0\n yp_arcseconds = 22.0\n ts.polar_motion_table = [0.0], [xp_arcseconds], [yp_arcseconds]\n\n top = iers2010.latlon(wms(42, 21, 24.1), wms(-71, 3, 24.8), 43.0)\n t = ts.utc(2005, 11, 12, 22, 2)\n\n # \"expected\" comes from:\n # from novas.compat import ter2cel\n # print(ter2cel(t.whole, t.ut1_fraction, t.delta_t, xp_arcseconds,\n # yp_arcseconds, top.itrs_xyz.km, method=1))\n\n expected = (3129.530248036487, -3535.1665884086683, 4273.94957733827)\n assert max(abs(top.at(t).position.km - expected)) < 3e-11\n\ndef test_polar_motion_when_computing_altaz_coordinates(ts):\n latitude = 37.3414\n longitude = -121.6429\n elevation = 1283.0\n ra_hours = 5.59\n dec_degrees = -5.45\n\n xp_arcseconds = 11.0\n yp_arcseconds = 22.0\n ts.polar_motion_table = [0.0], [xp_arcseconds], [yp_arcseconds]\n\n t = ts.utc(2020, 11, 12, 22, 16)\n top = wgs84.latlon(latitude, longitude, elevation)\n\n pos = Apparent.from_radec(ra_hours, dec_degrees, epoch=t)\n pos.t = t\n pos.center = top\n\n alt, az, distance = pos.altaz()\n\n # To generate the test altitude and azimuth below:\n # from novas.compat import equ2hor, make_on_surface\n # location = make_on_surface(latitude, longitude, elevation, 0, 0)\n # (novas_zd, novas_az), (rar, decr) = equ2hor(\n # t.ut1, t.delta_t, xp_arcseconds, yp_arcseconds, location,\n # ra_hours, dec_degrees, 0,\n # )\n # novas_alt = 90.0 - novas_zd\n # print(novas_alt, novas_az)\n\n novas_alt = -58.091983295564205\n novas_az = 1.8872567543791035\n\n assert abs(alt.degrees - novas_alt) < 1.9e-9\n assert abs(az.degrees - novas_az) < 1.3e-7\n\ndef test_subpoint_with_wrong_center(ts, angle):\n t = ts.utc(2020, 12, 31)\n p = Barycentric([0,0,0], t=t)\n with assert_raises(ValueError, 'a geographic subpoint can only be'\n ' calculated for positions measured from 399, the center'\n ' of the Earth, but this position has center 0'):\n wgs84.subpoint(p)\n\ndef test_iers2010_subpoint(ts, angle):\n t = ts.utc(2018, 1, 19, 14, 37, 55)\n # An elevation of 0 is more difficult for the routine's accuracy\n # than a very large elevation.\n top = iers2010.latlon(angle, angle, elevation_m=0.0)\n p = top.at(t)\n b = iers2010.subpoint(p)\n\n error_degrees = abs(b.latitude.degrees - angle)\n error_mas = 60.0 * 60.0 * 1000.0 * error_degrees\n assert error_mas < 0.1\n\n error_degrees = abs(b.longitude.degrees - angle)\n error_mas = 60.0 * 60.0 * 1000.0 * error_degrees\n assert error_mas < 0.1\n\ndef test_wgs84_subpoint(ts, angle):\n t = ts.utc(2018, 1, 19, 14, 37, 55)\n # An elevation of 0 is more difficult for the routine's accuracy\n # than a very large elevation.\n top = wgs84.latlon(angle, angle, elevation_m=0.0)\n p = top.at(t)\n b = wgs84.subpoint(p)\n\n error_degrees = abs(b.latitude.degrees - angle)\n error_mas = 60.0 * 60.0 * 1000.0 * error_degrees\n assert error_mas < 0.1\n\n error_degrees = abs(b.longitude.degrees - angle)\n error_mas = 60.0 * 60.0 * 1000.0 * error_degrees\n assert error_mas < 0.1\n\ndef test_wgs84_round_trip_with_polar_motion(ts, angle):\n t = ts.utc(2018, 1, 19, 14, 37, 55)\n ts.polar_motion_table = [0.0], [0.003483], [0.358609]\n\n top = wgs84.latlon(angle, angle, elevation_m=0.0)\n p = top.at(t)\n b = wgs84.subpoint(p)\n\n error_degrees = abs(b.latitude.degrees - angle)\n error_mas = 60.0 * 60.0 * 1000.0 * error_degrees\n assert error_mas < 0.1\n\n error_degrees = abs(b.longitude.degrees - angle)\n error_mas = 60.0 * 60.0 * 1000.0 * error_degrees\n assert error_mas < 0.1\n\ndef test_latlon_and_subpoint_methods(ts, angle):\n t = ts.utc(2020, 11, 3, 17, 5)\n g = wgs84.latlon(angle, 2 * angle, elevation_m=1234.0)\n pos = g.at(t)\n\n def check_lat(lat): assert abs(g.latitude.mas() - lat.mas()) < 0.1\n def check_lon(lon): assert abs(g.longitude.mas() - lon.mas()) < 0.1\n def check_height(h): assert abs(g.elevation.m - h.m) < 1e-7\n\n lat, lon = wgs84.latlon_of(pos)\n check_lat(lat)\n check_lon(lon)\n\n height = wgs84.height_of(pos)\n check_height(height)\n\n g = wgs84.geographic_position_of(pos)\n check_lat(g.latitude)\n check_lon(g.longitude)\n check_height(g.elevation)\n\n g = wgs84.subpoint(pos) # old deprecated method name\n check_lat(g.latitude)\n check_lon(g.longitude)\n check_height(g.elevation)\n\ndef test_deprecated_position_subpoint_method(ts, angle):\n t = ts.utc(2018, 1, 19, 14, 37, 55)\n top = iers2010.latlon(angle, angle, elevation_m=0.0)\n b = top.at(t).subpoint()\n\n error_degrees = abs(b.latitude.degrees - angle)\n error_mas = 60.0 * 60.0 * 1000.0 * error_degrees\n assert error_mas < 0.1\n\n error_degrees = abs(b.longitude.degrees - angle)\n error_mas = 60.0 * 60.0 * 1000.0 * error_degrees\n assert error_mas < 0.1\n" ]
[ [ "numpy.arange", "numpy.sqrt", "numpy.abs" ] ]
hieubkvn123/CenterNet
[ "438c1e8d0424122ece353bb20e64ff51f9444b6f" ]
[ "src/lib/detectors/exdet.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport cv2\nimport numpy as np\nimport time\nimport torch\n\nfrom models.decode import exct_decode, agnex_ct_decode\nfrom models.utils import flip_tensor\nfrom utils.image import get_affine_transform, transform_preds\nfrom utils.post_process import ctdet_post_process\nfrom utils.debugger import Debugger\n\nfrom .base_detector import BaseDetector\n\nclass ExdetDetector(BaseDetector):\n def __init__(self, opt):\n super(ExdetDetector, self).__init__(opt)\n self.decode = agnex_ct_decode if opt.agnostic_ex else exct_decode\n\n def process(self, images, return_time=False):\n with torch.no_grad():\n torch.cuda.synchronize()\n output = self.model(images)[-1]\n t_heat = output['hm_t'].sigmoid_()\n l_heat = output['hm_l'].sigmoid_()\n b_heat = output['hm_b'].sigmoid_()\n r_heat = output['hm_r'].sigmoid_()\n c_heat = output['hm_c'].sigmoid_()\n torch.cuda.synchronize()\n forward_time = time.time()\n if self.opt.reg_offset:\n dets = self.decode(t_heat, l_heat, b_heat, r_heat, c_heat, \n output['reg_t'], output['reg_l'],\n output['reg_b'], output['reg_r'], \n K=self.opt.K,\n scores_thresh=self.opt.scores_thresh,\n center_thresh=self.opt.center_thresh,\n aggr_weight=self.opt.aggr_weight)\n else:\n dets = self.decode(t_heat, l_heat, b_heat, r_heat, c_heat, K=self.opt.K,\n scores_thresh=self.opt.scores_thresh,\n center_thresh=self.opt.center_thresh,\n aggr_weight=self.opt.aggr_weight)\n if return_time:\n return output, dets, forward_time\n else:\n return output, dets\n\n def debug(self, debugger, images, dets, output, scale=1):\n detection = dets.detach().cpu().numpy().copy()\n detection[:, :, :4] *= self.opt.down_ratio\n for i in range(1):\n inp_height, inp_width = images.shape[2], images.shape[3]\n pred_hm = np.zeros((inp_height, inp_width, 3), dtype=np.uint8)\n img = images[i].detach().cpu().numpy().transpose(1, 2, 0)\n img = ((img * self.std + self.mean) * 255).astype(np.uint8)\n parts = ['t', 'l', 'b', 'r', 'c']\n for p in parts:\n tag = 'hm_{}'.format(p)\n pred = debugger.gen_colormap(\n output[tag][i].detach().cpu().numpy(), (inp_height, inp_width))\n if p != 'c':\n pred_hm = np.maximum(pred_hm, pred)\n else:\n debugger.add_blend_img(\n img, pred, 'pred_{}_{:.1f}'.format(p, scale))\n debugger.add_blend_img(img, pred_hm, 'pred_{:.1f}'.format(scale))\n debugger.add_img(img, img_id='out_{:.1f}'.format(scale))\n for k in range(len(detection[i])):\n # print('detection', detection[i, k, 4], detection[i, k])\n if detection[i, k, 4] > 0.01:\n # print('detection', detection[i, k, 4], detection[i, k])\n debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],\n detection[i, k, 4], \n img_id='out_{:.1f}'.format(scale))\n\n def post_process(self, dets, meta, scale=1):\n out_width, out_height = meta['out_width'], meta['out_height']\n dets = dets.detach().cpu().numpy().reshape(2, -1, 14)\n dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]\n dets = dets.reshape(1, -1, 14)\n dets[0, :, 0:2] = transform_preds(\n dets[0, :, 0:2], meta['c'], meta['s'], (out_width, out_height))\n dets[0, :, 2:4] = transform_preds(\n dets[0, :, 2:4], meta['c'], meta['s'], (out_width, out_height))\n dets[:, :, 0:4] /= scale\n return dets[0]\n\n def merge_outputs(self, detections):\n detections = np.concatenate(\n [detection for detection in detections], axis=0).astype(np.float32)\n classes = detections[..., -1]\n keep_inds = (detections[:, 4] > 0)\n detections = detections[keep_inds]\n classes = classes[keep_inds]\n\n results = {}\n for j in range(self.num_classes):\n keep_inds = (classes == j)\n results[j + 1] = detections[keep_inds][:, 0:7].astype(np.float32)\n soft_nms(results[j + 1], Nt=0.5, method=2)\n results[j + 1] = results[j + 1][:, 0:5]\n\n scores = np.hstack([\n results[j][:, -1] \n for j in range(1, self.num_classes + 1)\n ])\n if len(scores) > self.max_per_image:\n kth = len(scores) - self.max_per_image\n thresh = np.partition(scores, kth)[kth]\n for j in range(1, self.num_classes + 1):\n keep_inds = (results[j][:, -1] >= thresh)\n results[j] = results[j][keep_inds]\n return results\n\n\n def show_results(self, debugger, image, results):\n debugger.add_img(image, img_id='exdet')\n for j in range(1, self.num_classes + 1):\n for bbox in results[j]:\n if bbox[4] > self.opt.vis_thresh:\n debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='exdet')\n debugger.show_all_imgs(pause=self.pause)\n" ]
[ [ "numpy.partition", "torch.cuda.synchronize", "numpy.maximum", "numpy.concatenate", "torch.no_grad", "numpy.zeros" ] ]
ramonemiliani93/torchio
[ "b6ba3d168d5abb021dc19c4c71f7af72229b9cb8", "b6ba3d168d5abb021dc19c4c71f7af72229b9cb8" ]
[ "torchio/data/image.py", "tests/data/test_subject.py" ]
[ "import warnings\nfrom pathlib import Path\nfrom collections import Counter\nfrom collections.abc import Iterable\nfrom typing import Any, Dict, Tuple, Optional, Union, Sequence, List, Callable\n\nimport torch\nimport humanize\nimport numpy as np\nimport nibabel as nib\nimport SimpleITK as sitk\nfrom deprecated import deprecated\n\nfrom ..utils import get_stem, guess_external_viewer\nfrom ..typing import (\n TypePath,\n TypeData,\n TypeDataAffine,\n TypeTripletInt,\n TypeTripletFloat,\n TypeDirection3D,\n)\nfrom ..constants import DATA, TYPE, AFFINE, PATH, STEM, INTENSITY, LABEL\nfrom .io import (\n ensure_4d,\n read_image,\n write_image,\n nib_to_sitk,\n sitk_to_nib,\n check_uint_to_int,\n get_rotation_and_spacing_from_affine,\n get_sitk_metadata_from_ras_affine,\n read_shape,\n read_affine,\n)\n\n\nPROTECTED_KEYS = DATA, AFFINE, TYPE, PATH, STEM\nTypeBound = Tuple[float, float]\nTypeBounds = Tuple[TypeBound, TypeBound, TypeBound]\n\ndeprecation_message = (\n 'Setting the image data with the property setter is deprecated. Use the'\n ' set_data() method instead'\n)\n\n\nclass Image(dict):\n r\"\"\"TorchIO image.\n\n For information about medical image orientation, check out `NiBabel docs`_,\n the `3D Slicer wiki`_, `Graham Wideman's website`_, `FSL docs`_ or\n `SimpleITK docs`_.\n\n Args:\n path: Path to a file or sequence of paths to files that can be read by\n :mod:`SimpleITK` or :mod:`nibabel`, or to a directory containing\n DICOM files. If :attr:`tensor` is given, the data in\n :attr:`path` will not be read.\n If a sequence of paths is given, data\n will be concatenated on the channel dimension so spatial\n dimensions must match.\n type: Type of image, such as :attr:`torchio.INTENSITY` or\n :attr:`torchio.LABEL`. This will be used by the transforms to\n decide whether to apply an operation, or which interpolation to use\n when resampling. For example, `preprocessing`_ and `augmentation`_\n intensity transforms will only be applied to images with type\n :attr:`torchio.INTENSITY`. Spatial transforms will be applied to\n all types, and nearest neighbor interpolation is always used to\n resample images with type :attr:`torchio.LABEL`.\n The type :attr:`torchio.SAMPLING_MAP` may be used with instances of\n :class:`~torchio.data.sampler.weighted.WeightedSampler`.\n tensor: If :attr:`path` is not given, :attr:`tensor` must be a 4D\n :class:`torch.Tensor` or NumPy array with dimensions\n :math:`(C, W, H, D)`.\n affine: :math:`4 \\times 4` matrix to convert voxel coordinates to world\n coordinates. If ``None``, an identity matrix will be used. See the\n `NiBabel docs on coordinates`_ for more information.\n check_nans: If ``True``, issues a warning if NaNs are found\n in the image. If ``False``, images will not be checked for the\n presence of NaNs.\n reader: Callable object that takes a path and returns a 4D tensor and a\n 2D, :math:`4 \\times 4` affine matrix. This can be used if your data\n is saved in a custom format, such as ``.npy`` (see example below).\n If the affine matrix is ``None``, an identity matrix will be used.\n **kwargs: Items that will be added to the image dictionary, e.g.\n acquisition parameters.\n\n TorchIO images are `lazy loaders`_, i.e. the data is only loaded from disk\n when needed.\n\n Example:\n >>> import torchio as tio\n >>> import numpy as np\n >>> image = tio.ScalarImage('t1.nii.gz') # subclass of Image\n >>> image # not loaded yet\n ScalarImage(path: t1.nii.gz; type: intensity)\n >>> times_two = 2 * image.data # data is loaded and cached here\n >>> image\n ScalarImage(shape: (1, 256, 256, 176); spacing: (1.00, 1.00, 1.00); orientation: PIR+; memory: 44.0 MiB; type: intensity)\n >>> image.save('doubled_image.nii.gz')\n >>> numpy_reader = lambda path: np.load(path), np.eye(4)\n >>> image = tio.ScalarImage('t1.npy', reader=numpy_reader)\n\n .. _lazy loaders: https://en.wikipedia.org/wiki/Lazy_loading\n .. _preprocessing: https://torchio.readthedocs.io/transforms/preprocessing.html#intensity\n .. _augmentation: https://torchio.readthedocs.io/transforms/augmentation.html#intensity\n .. _NiBabel docs: https://nipy.org/nibabel/image_orientation.html\n .. _NiBabel docs on coordinates: https://nipy.org/nibabel/coordinate_systems.html#the-affine-matrix-as-a-transformation-between-spaces\n .. _3D Slicer wiki: https://www.slicer.org/wiki/Coordinate_systems\n .. _FSL docs: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Orientation%20Explained\n .. _SimpleITK docs: https://simpleitk.readthedocs.io/en/master/fundamentalConcepts.html\n .. _Graham Wideman's website: http://www.grahamwideman.com/gw/brain/orientation/orientterms.htm\n \"\"\" # noqa: E501\n def __init__(\n self,\n path: Union[TypePath, Sequence[TypePath], None] = None,\n type: str = None, # noqa: A002\n tensor: Optional[TypeData] = None,\n affine: Optional[TypeData] = None,\n check_nans: bool = False, # removed by ITK by default\n reader: Callable = read_image,\n **kwargs: Dict[str, Any],\n ):\n self.check_nans = check_nans\n self.reader = reader\n\n if type is None:\n warnings.warn(\n 'Not specifying the image type is deprecated and will be'\n ' mandatory in the future. You can probably use'\n ' tio.ScalarImage or tio.LabelMap instead'\n )\n type = INTENSITY # noqa: A001\n\n if path is None and tensor is None:\n raise ValueError('A value for path or tensor must be given')\n self._loaded = False\n\n tensor = self._parse_tensor(tensor)\n affine = self._parse_affine(affine)\n if tensor is not None:\n self.set_data(tensor)\n self.affine = affine\n self._loaded = True\n for key in PROTECTED_KEYS:\n if key in kwargs:\n message = f'Key \"{key}\" is reserved. Use a different one'\n raise ValueError(message)\n if 'channels_last' in kwargs:\n message = (\n 'The \"channels_last\" keyword argument is deprecated after'\n ' https://github.com/fepegar/torchio/pull/685 and will be'\n ' removed in the future'\n )\n warnings.warn(message, DeprecationWarning)\n\n super().__init__(**kwargs)\n self.path = self._parse_path(path)\n\n self[PATH] = '' if self.path is None else str(self.path)\n self[STEM] = '' if self.path is None else get_stem(self.path)\n self[TYPE] = type\n\n def __repr__(self):\n properties = []\n properties.extend([\n f'shape: {self.shape}',\n f'spacing: {self.get_spacing_string()}',\n f'orientation: {\"\".join(self.orientation)}+',\n ])\n if self._loaded:\n properties.append(f'dtype: {self.data.type()}')\n natural = humanize.naturalsize(self.memory, binary=True)\n properties.append(f'memory: {natural}')\n else:\n properties.append(f'path: \"{self.path}\"')\n\n properties = '; '.join(properties)\n string = f'{self.__class__.__name__}({properties})'\n return string\n\n def __getitem__(self, item):\n if item in (DATA, AFFINE):\n if item not in self:\n self.load()\n return super().__getitem__(item)\n\n def __array__(self):\n return self.data.numpy()\n\n def __copy__(self):\n kwargs = {\n 'tensor': self.data,\n 'affine': self.affine,\n 'type': self.type,\n 'path': self.path,\n }\n for key, value in self.items():\n if key in PROTECTED_KEYS:\n continue\n kwargs[key] = value # should I copy? deepcopy?\n return self.__class__(**kwargs)\n\n @property\n def data(self) -> torch.Tensor:\n \"\"\"Tensor data. Same as :class:`Image.tensor`.\"\"\"\n return self[DATA]\n\n @data.setter # type: ignore\n @deprecated(version='0.18.16', reason=deprecation_message)\n def data(self, tensor: TypeData):\n self.set_data(tensor)\n\n def set_data(self, tensor: TypeData):\n \"\"\"Store a 4D tensor in the :attr:`data` key and attribute.\n\n Args:\n tensor: 4D tensor with dimensions :math:`(C, W, H, D)`.\n \"\"\"\n self[DATA] = self._parse_tensor(tensor, none_ok=False)\n\n @property\n def tensor(self) -> torch.Tensor:\n \"\"\"Tensor data. Same as :class:`Image.data`.\"\"\"\n return self.data\n\n @property\n def affine(self) -> np.ndarray:\n \"\"\"Affine matrix to transform voxel indices into world coordinates.\"\"\"\n # If path is a dir (probably DICOM), just load the data\n # Same if it's a list of paths (used to create a 4D image)\n is_dir = isinstance(self.path, Path) and self.path.is_dir()\n if self._loaded or is_dir:\n affine = self[AFFINE]\n else:\n affine = read_affine(self.path)\n return affine\n\n @affine.setter\n def affine(self, matrix):\n self[AFFINE] = self._parse_affine(matrix)\n\n @property\n def type(self) -> str: # noqa: A003\n return self[TYPE]\n\n @property\n def shape(self) -> Tuple[int, int, int, int]:\n \"\"\"Tensor shape as :math:`(C, W, H, D)`.\"\"\"\n custom_reader = self.reader is not read_image\n multipath = not isinstance(self.path, (str, Path))\n if self._loaded or custom_reader or multipath or self.path.is_dir():\n shape = tuple(self.data.shape)\n else:\n shape = read_shape(self.path)\n return shape\n\n @property\n def spatial_shape(self) -> TypeTripletInt:\n \"\"\"Tensor spatial shape as :math:`(W, H, D)`.\"\"\"\n return self.shape[1:]\n\n def check_is_2d(self) -> None:\n if not self.is_2d():\n message = f'Image is not 2D. Spatial shape: {self.spatial_shape}'\n raise RuntimeError(message)\n\n @property\n def height(self) -> int:\n \"\"\"Image height, if 2D.\"\"\"\n self.check_is_2d()\n return self.spatial_shape[1]\n\n @property\n def width(self) -> int:\n \"\"\"Image width, if 2D.\"\"\"\n self.check_is_2d()\n return self.spatial_shape[0]\n\n @property\n def orientation(self) -> Tuple[str, str, str]:\n \"\"\"Orientation codes.\"\"\"\n return nib.aff2axcodes(self.affine)\n\n @property\n def direction(self) -> TypeDirection3D:\n _, _, direction = get_sitk_metadata_from_ras_affine(\n self.affine, lps=False)\n return direction\n\n @property\n def spacing(self) -> Tuple[float, float, float]:\n \"\"\"Voxel spacing in mm.\"\"\"\n _, spacing = get_rotation_and_spacing_from_affine(self.affine)\n return tuple(spacing)\n\n @property\n def origin(self) -> Tuple[float, float, float]:\n \"\"\"Center of first voxel in array, in mm.\"\"\"\n return tuple(self.affine[:3, 3])\n\n @property\n def itemsize(self):\n \"\"\"Element size of the data type.\"\"\"\n return self.data.element_size()\n\n @property\n def memory(self) -> float:\n \"\"\"Number of Bytes that the tensor takes in the RAM.\"\"\"\n return np.prod(self.shape) * self.itemsize\n\n @property\n def bounds(self) -> np.ndarray:\n \"\"\"Position of centers of voxels in smallest and largest indices.\"\"\"\n ini = 0, 0, 0\n fin = np.array(self.spatial_shape) - 1\n point_ini = nib.affines.apply_affine(self.affine, ini)\n point_fin = nib.affines.apply_affine(self.affine, fin)\n return np.array((point_ini, point_fin))\n\n @property\n def num_channels(self) -> int:\n \"\"\"Get the number of channels in the associated 4D tensor.\"\"\"\n return len(self.data)\n\n def axis_name_to_index(self, axis: str) -> int:\n \"\"\"Convert an axis name to an axis index.\n\n Args:\n axis: Possible inputs are ``'Left'``, ``'Right'``, ``'Anterior'``,\n ``'Posterior'``, ``'Inferior'``, ``'Superior'``. Lower-case\n versions and first letters are also valid, as only the first\n letter will be used.\n\n .. note:: If you are working with animals, you should probably use\n ``'Superior'``, ``'Inferior'``, ``'Anterior'`` and ``'Posterior'``\n for ``'Dorsal'``, ``'Ventral'``, ``'Rostral'`` and ``'Caudal'``,\n respectively.\n\n .. note:: If your images are 2D, you can use ``'Top'``, ``'Bottom'``,\n ``'Left'`` and ``'Right'``.\n \"\"\"\n # Top and bottom are used for the vertical 2D axis as the use of\n # Height vs Horizontal might be ambiguous\n\n if not isinstance(axis, str):\n raise ValueError('Axis must be a string')\n axis = axis[0].upper()\n\n # Generally, TorchIO tensors are (C, W, H, D)\n if axis in 'TB': # Top, Bottom\n return -2\n else:\n try:\n index = self.orientation.index(axis)\n except ValueError:\n index = self.orientation.index(self.flip_axis(axis))\n # Return negative indices so that it does not matter whether we\n # refer to spatial dimensions or not\n index = -3 + index\n return index\n\n @staticmethod\n def flip_axis(axis: str) -> str:\n \"\"\"Return the opposite axis label. For example, ``'L'`` -> ``'R'``.\n\n Args:\n axis: Axis label, such as ``'L'`` or ``'left'``.\n \"\"\"\n labels = 'LRPAISTBDV'\n first = labels[::2]\n last = labels[1::2]\n flip_dict = {a: b for a, b in zip(first + last, last + first)}\n axis = axis[0].upper()\n flipped_axis = flip_dict.get(axis)\n if flipped_axis is None:\n values = ', '.join(labels)\n message = f'Axis not understood. Please use one of: {values}'\n raise ValueError(message)\n return flipped_axis\n\n def get_spacing_string(self) -> str:\n strings = [f'{n:.2f}' for n in self.spacing]\n string = f'({\", \".join(strings)})'\n return string\n\n def get_bounds(self) -> TypeBounds:\n \"\"\"Get minimum and maximum world coordinates occupied by the image.\"\"\"\n first_index = 3 * (-0.5,)\n last_index = np.array(self.spatial_shape) - 0.5\n first_point = nib.affines.apply_affine(self.affine, first_index)\n last_point = nib.affines.apply_affine(self.affine, last_index)\n array = np.array((first_point, last_point))\n bounds_x, bounds_y, bounds_z = array.T.tolist()\n return bounds_x, bounds_y, bounds_z\n\n @staticmethod\n def _parse_single_path(\n path: TypePath\n ) -> Path:\n try:\n path = Path(path).expanduser()\n except TypeError:\n message = (\n f'Expected type str or Path but found {path} with type'\n f' {type(path)} instead'\n )\n raise TypeError(message)\n except RuntimeError:\n message = (\n f'Conversion to path not possible for variable: {path}'\n )\n raise RuntimeError(message)\n\n if not (path.is_file() or path.is_dir()): # might be a dir with DICOM\n raise FileNotFoundError(f'File not found: \"{path}\"')\n return path\n\n def _parse_path(\n self,\n path: Union[TypePath, Sequence[TypePath], None]\n ) -> Optional[Union[Path, List[Path]]]:\n if path is None:\n return None\n if isinstance(path, Iterable) and not isinstance(path, str):\n return [self._parse_single_path(p) for p in path]\n else:\n return self._parse_single_path(path)\n\n def _parse_tensor(\n self,\n tensor: Optional[TypeData],\n none_ok: bool = True,\n ) -> Optional[torch.Tensor]:\n if tensor is None:\n if none_ok:\n return None\n else:\n raise RuntimeError('Input tensor cannot be None')\n if isinstance(tensor, np.ndarray):\n tensor = check_uint_to_int(tensor)\n tensor = torch.as_tensor(tensor)\n elif not isinstance(tensor, torch.Tensor):\n message = (\n 'Input tensor must be a PyTorch tensor or NumPy array,'\n f' but type \"{type(tensor)}\" was found'\n )\n raise TypeError(message)\n ndim = tensor.ndim\n if ndim != 4:\n raise ValueError(f'Input tensor must be 4D, but it is {ndim}D')\n if tensor.dtype == torch.bool:\n tensor = tensor.to(torch.uint8)\n if self.check_nans and torch.isnan(tensor).any():\n warnings.warn('NaNs found in tensor', RuntimeWarning)\n return tensor\n\n @staticmethod\n def _parse_tensor_shape(tensor: torch.Tensor) -> TypeData:\n return ensure_4d(tensor)\n\n @staticmethod\n def _parse_affine(affine: Optional[TypeData]) -> np.ndarray:\n if affine is None:\n return np.eye(4)\n if isinstance(affine, torch.Tensor):\n affine = affine.numpy()\n if not isinstance(affine, np.ndarray):\n bad_type = type(affine)\n raise TypeError(f'Affine must be a NumPy array, not {bad_type}')\n if affine.shape != (4, 4):\n bad_shape = affine.shape\n raise ValueError(f'Affine shape must be (4, 4), not {bad_shape}')\n return affine.astype(np.float64)\n\n def load(self) -> None:\n r\"\"\"Load the image from disk.\n\n Returns:\n Tuple containing a 4D tensor of size :math:`(C, W, H, D)` and a 2D\n :math:`4 \\times 4` affine matrix to convert voxel indices to world\n coordinates.\n \"\"\"\n if self._loaded:\n return\n paths = self.path if isinstance(self.path, list) else [self.path]\n tensor, affine = self.read_and_check(paths[0])\n tensors = [tensor]\n for path in paths[1:]:\n new_tensor, new_affine = self.read_and_check(path)\n if not np.array_equal(affine, new_affine):\n message = (\n 'Files have different affine matrices.'\n f'\\nMatrix of {paths[0]}:'\n f'\\n{affine}'\n f'\\nMatrix of {path}:'\n f'\\n{new_affine}'\n )\n warnings.warn(message, RuntimeWarning)\n if not tensor.shape[1:] == new_tensor.shape[1:]:\n message = (\n f'Files shape do not match, found {tensor.shape}'\n f'and {new_tensor.shape}'\n )\n RuntimeError(message)\n tensors.append(new_tensor)\n tensor = torch.cat(tensors)\n self.set_data(tensor)\n self.affine = affine\n self._loaded = True\n\n def read_and_check(self, path: TypePath) -> TypeDataAffine:\n tensor, affine = self.reader(path)\n tensor = self._parse_tensor_shape(tensor)\n tensor = self._parse_tensor(tensor)\n affine = self._parse_affine(affine)\n if self.check_nans and torch.isnan(tensor).any():\n warnings.warn(f'NaNs found in file \"{path}\"', RuntimeWarning)\n return tensor, affine\n\n def save(self, path: TypePath, squeeze: Optional[bool] = None) -> None:\n \"\"\"Save image to disk.\n\n Args:\n path: String or instance of :class:`pathlib.Path`.\n squeeze: Whether to remove singleton dimensions before saving.\n If ``None``, the array will be squeezed if the output format is\n JP(E)G, PNG, BMP or TIF(F).\n \"\"\"\n write_image(\n self.data,\n self.affine,\n path,\n squeeze=squeeze,\n )\n\n def is_2d(self) -> bool:\n return self.shape[-1] == 1\n\n def numpy(self) -> np.ndarray:\n \"\"\"Get a NumPy array containing the image data.\"\"\"\n return np.asarray(self)\n\n def as_sitk(self, **kwargs) -> sitk.Image:\n \"\"\"Get the image as an instance of :class:`sitk.Image`.\"\"\"\n return nib_to_sitk(self.data, self.affine, **kwargs)\n\n @classmethod\n def from_sitk(cls, sitk_image):\n \"\"\"Instantiate a new TorchIO image from a :class:`sitk.Image`.\n\n Example:\n >>> import torchio as tio\n >>> import SimpleITK as sitk\n >>> sitk_image = sitk.Image(20, 30, 40, sitk.sitkUInt16)\n >>> tio.LabelMap.from_sitk(sitk_image)\n LabelMap(shape: (1, 20, 30, 40); spacing: (1.00, 1.00, 1.00); orientation: LPS+; memory: 93.8 KiB; dtype: torch.IntTensor)\n >>> sitk_image = sitk.Image((224, 224), sitk.sitkVectorFloat32, 3)\n >>> tio.ScalarImage.from_sitk(sitk_image)\n ScalarImage(shape: (3, 224, 224, 1); spacing: (1.00, 1.00, 1.00); orientation: LPS+; memory: 588.0 KiB; dtype: torch.FloatTensor)\n \"\"\" # noqa: E501\n tensor, affine = sitk_to_nib(sitk_image)\n return cls(tensor=tensor, affine=affine)\n\n def as_pil(self, transpose=True):\n \"\"\"Get the image as an instance of :class:`PIL.Image`.\n\n .. note:: Values will be clamped to 0-255 and cast to uint8.\n .. note:: To use this method, `Pillow` needs to be installed:\n `pip install Pillow`.\n \"\"\"\n try:\n from PIL import Image as ImagePIL\n except ModuleNotFoundError as e:\n message = (\n 'Please install Pillow to use Image.as_pil():'\n ' pip install Pillow'\n )\n raise RuntimeError(message) from e\n\n self.check_is_2d()\n tensor = self.data\n if len(tensor) == 1:\n tensor = torch.cat(3 * [tensor])\n if len(tensor) != 3:\n raise RuntimeError('The image must have 1 or 3 channels')\n if transpose:\n tensor = tensor.permute(3, 2, 1, 0)\n else:\n tensor = tensor.permute(3, 1, 2, 0)\n array = tensor.clamp(0, 255).numpy()[0]\n return ImagePIL.fromarray(array.astype(np.uint8))\n\n def to_gif(\n self,\n axis: int,\n duration: float, # of full gif\n output_path: TypePath,\n loop: int = 0,\n rescale: bool = True,\n optimize: bool = True,\n reverse: bool = False,\n ) -> None:\n \"\"\"Save an animated GIF of the image.\n\n Args:\n axis: Spatial axis (0, 1 or 2).\n duration: Duration of the full animation in seconds.\n output_path: Path to the output GIF file.\n loop: Number of times the GIF should loop.\n ``0`` means that it will loop forever.\n rescale: Use :class:`~torchio.transforms.preprocessing.intensity.rescale.RescaleIntensity`\n to rescale the intensity values to :math:`[0, 255]`.\n optimize: If ``True``, attempt to compress the palette by\n eliminating unused colors. This is only useful if the palette\n can be compressed to the next smaller power of 2 elements.\n reverse: Reverse the temporal order of frames.\n \"\"\" # noqa: E501\n from ..visualization import make_gif # avoid circular import\n make_gif(\n self.data,\n axis,\n duration,\n output_path,\n loop=loop,\n rescale=rescale,\n optimize=optimize,\n reverse=reverse,\n )\n\n def get_center(self, lps: bool = False) -> TypeTripletFloat:\n \"\"\"Get image center in RAS+ or LPS+ coordinates.\n\n Args:\n lps: If ``True``, the coordinates will be in LPS+ orientation, i.e.\n the first dimension grows towards the left, etc. Otherwise, the\n coordinates will be in RAS+ orientation.\n \"\"\"\n size = np.array(self.spatial_shape)\n center_index = (size - 1) / 2\n r, a, s = nib.affines.apply_affine(self.affine, center_index)\n if lps:\n return (-r, -a, s)\n else:\n return (r, a, s)\n\n def set_check_nans(self, check_nans: bool) -> None:\n self.check_nans = check_nans\n\n def plot(self, **kwargs) -> None:\n \"\"\"Plot image.\"\"\"\n if self.is_2d():\n self.as_pil().show()\n else:\n from ..visualization import plot_volume # avoid circular import\n plot_volume(self, **kwargs)\n\n def show(self, viewer_path: Optional[TypePath] = None) -> None:\n \"\"\"Open the image using external software.\n\n Args:\n viewer_path: Path to the application used to view the image. If\n ``None``, the value of the environment variable\n ``SITK_SHOW_COMMAND`` will be used. If this variable is also\n not set, TorchIO will try to guess the location of\n `ITK-SNAP <http://www.itksnap.org/pmwiki/pmwiki.php>`_ and\n `3D Slicer <https://www.slicer.org/>`_.\n\n Raises:\n RuntimeError: If the viewer is not found.\n \"\"\"\n sitk_image = self.as_sitk()\n image_viewer = sitk.ImageViewer()\n # This is so that 3D Slicer creates segmentation nodes from label maps\n if self.__class__.__name__ == 'LabelMap':\n image_viewer.SetFileExtension('.seg.nrrd')\n if viewer_path is not None:\n image_viewer.SetApplication(str(viewer_path))\n try:\n image_viewer.Execute(sitk_image)\n except RuntimeError as e:\n viewer_path = guess_external_viewer()\n if viewer_path is None:\n message = (\n 'No external viewer has been found. Please set the'\n ' environment variable SITK_SHOW_COMMAND to a viewer of'\n ' your choice'\n )\n raise RuntimeError(message) from e\n image_viewer.SetApplication(str(viewer_path))\n image_viewer.Execute(sitk_image)\n\n\nclass ScalarImage(Image):\n \"\"\"Image whose pixel values represent scalars.\n\n Example:\n >>> import torch\n >>> import torchio as tio\n >>> # Loading from a file\n >>> t1_image = tio.ScalarImage('t1.nii.gz')\n >>> dmri = tio.ScalarImage(tensor=torch.rand(32, 128, 128, 88))\n >>> image = tio.ScalarImage('safe_image.nrrd', check_nans=False)\n >>> data, affine = image.data, image.affine\n >>> affine.shape\n (4, 4)\n >>> image.data is image[tio.DATA]\n True\n >>> image.data is image.tensor\n True\n >>> type(image.data)\n torch.Tensor\n\n See :class:`~torchio.Image` for more information.\n \"\"\"\n def __init__(self, *args, **kwargs):\n if 'type' in kwargs and kwargs['type'] != INTENSITY:\n raise ValueError('Type of ScalarImage is always torchio.INTENSITY')\n kwargs.update({'type': INTENSITY})\n super().__init__(*args, **kwargs)\n\n def hist(self, **kwargs) -> None:\n \"\"\"Plot histogram.\"\"\"\n from ..visualization import plot_histogram\n x = self.data.flatten().numpy()\n plot_histogram(x, **kwargs)\n\n\nclass LabelMap(Image):\n \"\"\"Image whose pixel values represent categorical labels.\n\n Example:\n >>> import torch\n >>> import torchio as tio\n >>> labels = tio.LabelMap(tensor=torch.rand(1, 128, 128, 68) > 0.5)\n >>> labels = tio.LabelMap('t1_seg.nii.gz') # loading from a file\n >>> tpm = tio.LabelMap( # loading from files\n ... 'gray_matter.nii.gz',\n ... 'white_matter.nii.gz',\n ... 'csf.nii.gz',\n ... )\n\n Intensity transforms are not applied to these images.\n\n Nearest neighbor interpolation is always used to resample label maps,\n independently of the specified interpolation type in the transform\n instantiation.\n\n See :class:`~torchio.Image` for more information.\n \"\"\"\n def __init__(self, *args, **kwargs):\n if 'type' in kwargs and kwargs['type'] != LABEL:\n raise ValueError('Type of LabelMap is always torchio.LABEL')\n kwargs.update({'type': LABEL})\n super().__init__(*args, **kwargs)\n\n def count_nonzero(self) -> int:\n \"\"\"Get the number of voxels that are not 0.\"\"\"\n return self.data.count_nonzero().item()\n\n def count_labels(self) -> Dict[int, int]:\n \"\"\"Get the number of voxels in each label.\"\"\"\n values_list = self.data.flatten().tolist()\n counter = Counter(values_list)\n counts = {label: counter[label] for label in sorted(counter)}\n return counts\n", "import copy\nimport tempfile\nimport torch\nimport numpy as np\nimport torchio as tio\nfrom ..utils import TorchioTestCase\n\n\nclass TestSubject(TorchioTestCase):\n \"\"\"Tests for `Subject`.\"\"\"\n def test_positional_args(self):\n with self.assertRaises(ValueError):\n tio.Subject(0)\n\n def test_input_dict(self):\n with tempfile.NamedTemporaryFile(delete=False) as f:\n input_dict = {'image': tio.ScalarImage(f.name)}\n tio.Subject(input_dict)\n tio.Subject(**input_dict)\n\n def test_no_sample(self):\n with tempfile.NamedTemporaryFile(delete=False) as f:\n input_dict = {'image': tio.ScalarImage(f.name)}\n subject = tio.Subject(input_dict)\n with self.assertRaises(RuntimeError):\n with self.assertWarns(UserWarning):\n tio.RandomFlip()(subject)\n\n def test_history(self):\n transformed = tio.RandomGamma()(self.sample_subject)\n self.assertIs(len(transformed.history), 1)\n\n def test_inconsistent_shape(self):\n subject = tio.Subject(\n a=tio.ScalarImage(tensor=torch.rand(1, 2, 3, 4)),\n b=tio.ScalarImage(tensor=torch.rand(2, 2, 3, 4)),\n )\n subject.spatial_shape\n with self.assertRaises(RuntimeError):\n subject.shape\n\n def test_inconsistent_spatial_shape(self):\n subject = tio.Subject(\n a=tio.ScalarImage(tensor=torch.rand(1, 3, 3, 4)),\n b=tio.ScalarImage(tensor=torch.rand(2, 2, 3, 4)),\n )\n with self.assertRaises(RuntimeError):\n subject.spatial_shape\n\n def test_plot(self):\n self.sample_subject.plot(\n show=False,\n output_path=self.dir / 'figure.png',\n cmap_dict={\n 't2': 'viridis',\n 'label': {0: 'yellow', 1: 'blue'},\n },\n )\n\n def test_plot_one_image(self):\n path = self.get_image_path('t1_plot')\n subject = tio.Subject(t1=tio.ScalarImage(path))\n subject.plot(show=False)\n\n def test_same_space(self):\n # https://github.com/fepegar/torchio/issues/381\n affine1 = np.array([\n [4.27109375e-14, -8.71264808e-03, 9.99876633e-01, -3.39850907e+01],\n [-5.54687500e-01, -2.71630469e-12, 8.75148028e-17, 1.62282930e+02],\n [2.71575000e-12, -5.54619070e-01, -1.57073092e-02, 2.28515784e+02],\n [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00],\n ])\n affine2 = np.array([\n [3.67499773e-08, -8.71257665e-03, 9.99876635e-01, -3.39850922e+01],\n [-5.54687500e-01, 3.67499771e-08, 6.73024385e-08, 1.62282928e+02],\n [-3.73318194e-08, -5.54619071e-01, -1.57071802e-02, 2.28515778e+02], # noqa: E501\n [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00],\n ])\n t = torch.rand(1, 2, 3, 4)\n subject = tio.Subject(\n im1=tio.ScalarImage(tensor=t, affine=affine1),\n im2=tio.ScalarImage(tensor=t, affine=affine2),\n )\n subject.check_consistent_space()\n\n def test_delete_image(self):\n subject = copy.deepcopy(self.sample_subject)\n subject.remove_image('t1')\n with self.assertRaises(KeyError):\n subject['t1']\n with self.assertRaises(AttributeError):\n subject.t1\n\n def test_2d(self):\n subject = self.make_2d(self.sample_subject)\n assert subject.is_2d()\n\n def test_different_non_numeric(self):\n with self.assertRaises(RuntimeError):\n self.sample_subject.check_consistent_attribute('path')\n\n def test_bad_arg(self):\n with self.assertRaises(ValueError):\n tio.Subject(0)\n\n def test_no_images(self):\n with self.assertRaises(ValueError):\n tio.Subject(a=0)\n" ]
[ [ "numpy.array_equal", "torch.cat", "numpy.asarray", "torch.isnan", "numpy.eye", "numpy.prod", "numpy.array", "torch.as_tensor" ], [ "numpy.array", "torch.rand" ] ]
yardenas/la-mbda
[ "17304eabb51aabd3eed43867277ee6a00ec42ce8" ]
[ "la_mbda/replay_buffer.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.mixed_precision import experimental as prec\nfrom tensorflow_probability import stats as tfps\nfrom tf_agents.replay_buffers import episodic_replay_buffer\n\nimport la_mbda.utils as utils\n\n\nclass EpisodeBuffer(object):\n def __init__(self, safety):\n self._current_episode = {'observation': [],\n 'action': [],\n 'reward': [],\n 'terminal': [],\n 'info': []}\n if safety:\n self._current_episode['cost'] = []\n\n def store(self, transition):\n if len(self._current_episode['observation']) == 0:\n for k, v in self._current_episode.items():\n if k == 'cost':\n v.append(transition['info']['cost'])\n else:\n v.append(transition[k])\n self._current_episode['observation'].append(transition['next_observation'])\n else:\n for k, v in self._current_episode.items():\n if k == 'observation':\n v.append(transition['next_observation'])\n elif k == 'cost':\n v.append(transition['info']['cost'])\n else:\n v.append(transition[k])\n\n def flush(self):\n episode_data = {k: np.array(v) for k, v in self._current_episode.items()\n if k != 'info'}\n for v in self._current_episode.values():\n v.clear()\n return episode_data\n\n\nclass ReplayBuffer(tf.Module):\n def __init__(self, safety, observation_type, observation_shape, action_shape,\n sequence_length, batch_size, seed, capacity=1000):\n super(ReplayBuffer, self).__init__()\n self._dtype = prec.global_policy().compute_dtype\n self._batch_size = batch_size\n self._sequence_length = sequence_length\n self._observation_type = observation_type\n self.observation_mean = tf.Variable(tf.zeros(observation_shape),\n dtype=np.float32, trainable=False)\n self.observation_variance = tf.Variable(tf.zeros(observation_shape),\n dtype=np.float32, trainable=False)\n self.running_episode_count = tf.Variable(0, trainable=False)\n self._current_episode = EpisodeBuffer(safety)\n self._safety = safety\n obs_dtype = tf.uint8 if observation_type in ['rgb_image', 'binary_image'] \\\n else tf.float32\n data_spec = {'observation': tf.TensorSpec(observation_shape, obs_dtype),\n 'action': tf.TensorSpec(action_shape, self._dtype),\n 'reward': tf.TensorSpec((), self._dtype),\n 'terminal': tf.TensorSpec((), self._dtype)}\n if self._safety:\n data_spec['cost'] = tf.TensorSpec((), self._dtype)\n self._buffer = episodic_replay_buffer.EpisodicReplayBuffer(\n data_spec,\n seed=seed,\n capacity=capacity,\n buffer_size=1,\n dataset_drop_remainder=True,\n completed_only=True,\n begin_episode_fn=lambda _: True,\n end_episode_fn=lambda _: True)\n self._dataset = self._buffer.as_dataset(self._batch_size,\n self._sequence_length)\n self._dataset = self._dataset.map(self._preprocess,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n self._dataset = self._dataset.prefetch(10)\n\n @property\n def episode_count(self):\n return self.running_episode_count.value()\n\n def _finalize_episode(self):\n episode_data = self._current_episode.flush()\n if self._observation_type == 'dense':\n self._update_statistics(episode_data['observation'])\n elif self._observation_type in ['rgb_image', 'binary_image']:\n bias = dict(rgb_image=0.5, binary_image=0.0).get(self._observation_type)\n episode_data['observation'] = (episode_data['observation'] + bias) * 255.0\n episode_data['observation'].astype(np.uint8)\n self.running_episode_count.assign_add(1)\n self._buffer.add_sequence(episode_data, tf.cast(self.episode_count, tf.int64))\n\n def _update_statistics(self, observation):\n tfps.assign_moving_mean_variance(\n observation,\n self.observation_mean,\n self.observation_variance,\n axis=0,\n zero_debias_count=self.running_episode_count)\n\n def _preprocess(self, episode, _):\n if self._observation_type == 'dense':\n episode['observation'] = utils.normalize_clip(\n tf.cast(episode['observation'], tf.float32),\n tf.convert_to_tensor(self.observation_mean),\n tf.sqrt(tf.convert_to_tensor(self.observation_variance)), 10.0)\n elif self._observation_type in ['rgb_image', 'binary_image']:\n bias = dict(rgb_image=0.5, binary_image=0.0).get(self._observation_type)\n episode['observation'] = utils.preprocess(\n tf.cast(episode['observation'], tf.float32), bias)\n else:\n raise RuntimeError(\"Invalid observation type\")\n episode['observation'] = tf.cast(episode['observation'], self._dtype)\n episode['terminal'] = episode['terminal'][:, :-1]\n episode['reward'] = episode['reward'][:, :-1]\n episode['action'] = episode['action'][:, :-1]\n if self._safety:\n episode['cost'] = episode['cost'][:, :-1]\n return episode\n\n def store(self, transition):\n self._current_episode.store(transition)\n if transition['terminal'] or transition['info'].get('TimeLimit.truncated'):\n self._finalize_episode()\n\n def sample(self, n_batches):\n return self._dataset.take(n_batches)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.Variable", "tensorflow.zeros", "tensorflow.cast", "tensorflow.keras.mixed_precision.experimental.global_policy", "numpy.array", "tensorflow.TensorSpec" ] ]
MuhammadEzzatHBK/AUG---Problem-Solving-For-Bioinformatics-Level-1-
[ "55e129c0c5c6441f88775bd4eab6e67fface77eb" ]
[ "Session 6/GC-Skew.py" ]
[ "import matplotlib.pyplot as plt\n\nseq = \"TTGATTACCTTATTTGATCATTACACATTGTACGCTTGTGTCAAAATATCACATGTGCCT\"\nC = 0\nG = 0\nGCSkew = []\n\nfor ch in seq:\n if ch == 'C':\n C += 1\n elif ch == 'G':\n G += 1\n if G != 0 or C != 0:\n GCSkew.append((G-C)/(G+C))\n else:\n GCSkew.append(0)\n\nplt.plot(range(len(seq)), GCSkew)\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show" ] ]
m0000Amir/BST_BS_types_set
[ "c455aaafa632a0de8a1075262097f042993721c7" ]
[ "bab/evaluation.py" ]
[ "\"\"\"\nCALCULATION OF EVALUATIONS\n- node noncoverage;\n- node cost;\n- node delay\n\"\"\"\nfrom .bst import Node\nfrom typing import Tuple, Any\n\nimport numpy as np\n\n\ndef noncov_btwn_sta(place1: float, place2: float,\n cov1: float, cov2: float) -> float:\n \"\"\"\n Calculate noncoverage between place1 and place2\n :param place1:\n :param place2:\n :param cov1:\n :param cov2:\n :return: Noncoverage between two placed station\n \"\"\"\n dist = abs(place2 - place1)\n cov = cov1 + cov2\n return max([dist - cov, 0])\n\n\ndef solve_noncoverage(p: int, s: int, node: Node, gtw: Tuple[float],\n place: Tuple[Any], cov: Tuple[Any]) -> float:\n \"\"\"\n Calculate estimates of noncoverage\n :param p: index of placement\n :param s: index of station\n :param node: parent node\n :param gtw: gateways coordinates\n :param place: placement coordinates\n :param cov: stations coverage radius\n\n :return: Noncoverage\n \"\"\"\n i, j = np.where(node.pi == 1)\n\n if len(i) == 0: # searching is just started\n left_sta_place = gtw[0]\n left_sta_cov = 0\n else:\n left_sta_place = place[i[-1]]\n left_sta_cov = cov[j[-1]]\n\n # left noncoverage\n left_noncov = node.noncov.left + noncov_btwn_sta(left_sta_place,\n place[p],\n left_sta_cov,\n cov[s])\n # right noncoverage\n unbusy_sta_cov = [cov[i] for i in range(len(cov))\n if (i not in j) and (i != s)]\n\n unbusy_place = [place[j] for j in range(len(place))\n if (j not in i) and (j != p)]\n\n if len(unbusy_sta_cov) > len(unbusy_place):\n sort_cov = unbusy_sta_cov\n sort_cov.sort()\n unbusy_sta_cov = sort_cov[-1:-(len(unbusy_place) + 1):-1]\n\n right_noncov = noncov_btwn_sta(place[p],\n gtw[-1],\n cov[s],\n sum(2 * unbusy_sta_cov))\n # node noncoverage\n node.left_child.noncov.left = left_noncov\n node.left_child.noncov.right = max((gtw[-1] - place[p]) - cov[s], 0)\n return left_noncov + right_noncov\n\n\ndef solve_cost(node: Node, cost: float) -> float:\n return node.cost + cost\n\n\ndef solve_delay(node: Node, arrival_rate: float,\n average_packet_size: float, throughput: float) -> float:\n \"\"\"\n Colving node time delay.\n\n Let's accept the assumption that any station represent as M/M/1 queue, where\n arrivals are determined by a Poisson process and serivice times have an\n exponentional distribution.\n\n According to Burke's theorem at the exit from the node we also have a\n Poisson flow with arrival rate that is equal to sum of arrival rates of all\n incoming flows.\n\n \"\"\"\n departure_rate = throughput / average_packet_size\n # Amount of all placed station is\n _, j = np.where(node.left_child.pi == 1)\n placed_sta_amount = len(j)\n # By Burke's total arrival rate is\n total_arrival_rate = arrival_rate * placed_sta_amount\n\n rho = total_arrival_rate / departure_rate\n # rho must be less than 0.9 (rho < 1 theoretically)\n if rho <= 0.9:\n # By Little's law the average time delay at each station is\n mean_system_size = rho / (1 - rho)\n mean_service_time = round(mean_system_size / total_arrival_rate, 5)\n return node.delay + mean_service_time\n else:\n return float('inf')\n" ]
[ [ "numpy.where" ] ]
i-usalko/torri
[ "4e533d8e13cd9db7694c9983c73f874856e4cd69" ]
[ "tests/test_methods.py" ]
[ "import os\nimport unittest\nfrom torri import Torri, TorriException\nfrom timeit import default_timer as timer\nfrom cv2 import cv2\nimport numpy as np\n\n\nclass TestMethods(unittest.TestCase):\n\n def test_case_one(self):\n t = Torri()\n with self.assertRaises(TorriException):\n obj = t.read_file_with_mmap('Not exist path!')\n print(obj)\n\n def test_case_two(self):\n t = Torri()\n print(t.gencmd('get_throttled'))\n self.assertTrue(True)\n\n @unittest.skip # Manual run only\n def test_case_read_without_mmap(self):\n t = Torri()\n time = timer()\n data = t.decode_jpeg('/media/pi/Transcend/.mock-camera-images/2020-06-22-07-10-39.72866b38fcdb4b8ba0c76f2ba48d7c67-v.jpg', 1920, 1080, use_mmap=False)\n print(f'Execution time decoding is {timer() - time}s')\n #width = 1920\n #height = 1080\n #size = width * height\n gbr24_data = np.array(data, copy=False, dtype=np.uint8)\n gbr24_data = gbr24_data.reshape((1080, 1920, 3))\n print(f'Execution time total is {timer() - time}s')\n\n success, image_byte_array = cv2.imencode('.jpeg',\n gbr24_data,\n [cv2.IMWRITE_JPEG_QUALITY, 100])\n with open('test.jpeg', 'wb') as writer:\n writer.write(image_byte_array)\n writer.flush()\n os.fsync(writer.fileno())\n\n self.assertTrue(True)\n\n @unittest.skip # Manual run only\n def test_case_control(self):\n time = timer()\n with open('/media/pi/Transcend/.mock-camera-images/2020-06-22-07-10-39.72866b38fcdb4b8ba0c76f2ba48d7c67-v.jpg', 'rb') as reader:\n image_data = np.frombuffer(reader.read(), dtype=np.uint8)\n gbr24_data = cv2.imdecode(image_data, 1)\n print(f'Execution time is {timer() - time}s')\n print(f'RGB data shape is : {gbr24_data.shape}')\n\n success, image_byte_array = cv2.imencode('.jpeg',\n gbr24_data,\n [cv2.IMWRITE_JPEG_QUALITY, 100])\n with open('test-control.jpeg', 'wb') as writer:\n writer.write(image_byte_array)\n writer.flush()\n os.fsync(writer.fileno())\n\n self.assertTrue(True)\n\n def test_case_del(self):\n t = Torri()\n obj = t.decode_jpeg('Not exist path!', 1920, 1080)\n print(obj)\n t.free(obj)\n print('Ok it\\'s free')\n print(obj)\n self.assertTrue(True)\n\n def test_read_file_with_mmap(self):\n t = Torri()\n with self.assertRaises(TorriException):\n obj = t.read_file_with_mmap('Not exist path!', use_mmap=True)\n print(obj)\n\n @unittest.skip # Manual run only\n def test_read_file_with_mmap(self):\n t = Torri()\n time = timer()\n obj = t.read_file_with_mmap('/media/pi/Transcend/.mock-camera-images/2020-06-22-07-10-39.72866b38fcdb4b8ba0c76f2ba48d7c67-v.jpg')\n print(f'Execution time read with mmap is {timer() - time}s')\n print(obj)\n self.assertTrue(True)\n\n @unittest.skip # Manual run only\n def test_case_mmal_and_mmap(self):\n t = Torri()\n time = timer()\n data = t.decode_jpeg('/media/pi/Transcend/.mock-camera-images/2020-06-22-07-10-39.72866b38fcdb4b8ba0c76f2ba48d7c67-v.jpg', 1920, 1080, use_mmap=True)\n print(f'Execution time decoding is {timer() - time}s')\n gbr24_data = np.array(data, copy=False, dtype=np.uint8)\n gbr24_data = gbr24_data.reshape((1080, 1920, 3))\n print(f'Execution time total is {timer() - time}s')\n\n success, image_byte_array = cv2.imencode('.jpeg',\n gbr24_data,\n [cv2.IMWRITE_JPEG_QUALITY, 100])\n with open('test-mmal-and-mmap.jpeg', 'wb') as writer:\n writer.write(image_byte_array)\n writer.flush()\n os.fsync(writer.fileno())\n\n self.assertTrue(True)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array" ] ]
midhunexeter/sklearn-porter
[ "945a67a1509b5c42d83e2a2f4a82d20aee2cfbce" ]
[ "tests/estimator/classifier/ExtraTreesClassifier/ExtraTreesClassifierJSTest.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom unittest import TestCase\n\nfrom sklearn.ensemble import ExtraTreesClassifier\n\nfrom tests.estimator.classifier.Classifier import Classifier\nfrom tests.language.JavaScript import JavaScript\n\n\nclass ExtraTreesClassifierJSTest(JavaScript, Classifier, TestCase):\n\n def setUp(self):\n super(ExtraTreesClassifierJSTest, self).setUp()\n self.estimator = ExtraTreesClassifier(random_state=0)\n\n def tearDown(self):\n super(ExtraTreesClassifierJSTest, self).tearDown()\n" ]
[ [ "sklearn.ensemble.ExtraTreesClassifier" ] ]
anonymous-authors-iclr2022-481/ltcl
[ "0d8902228fa6c37f875bb60c4d16988462a9655a" ]
[ "ltcl/tools/gen_ball.py" ]
[ "import argparse\nimport torchvision.transforms as transforms\n\nimport os\nimport numpy as np\nfrom ltcl.datasets.physics_dataset import PhysicsDataset\nfrom ltcl.tools.utils import load_yaml\nimport yaml\nimport ipdb as pdb\n\nclass Namespace(object):\n def __init__(self, **kwds):\n self.__dict__.update(kwds)\n def __repr__(self):\n items = list(self.__dict__.items())\n temp = []\n for name, value in items:\n if not name.startswith('_'):\n temp.append('%s=%r' % (name, value))\n temp.sort()\n return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))\n\ndef main(args):\n assert args.exp is not None, \"FATAL: \"+__file__+\": You must specify an exp config file (e.g., *.yaml)\"\n\n script_dir = os.path.dirname(__file__)\n rel_path = os.path.join('../configs', \n '%s.yaml'%args.exp)\n abs_file_path = os.path.abspath(os.path.join(script_dir, rel_path))\n\n cfg = load_yaml(abs_file_path)\n\n print(\"######### Configuration #########\")\n print(yaml.dump(cfg, default_flow_style=False))\n print(\"#################################\")\n # Genenrate args\n namespace = Namespace()\n for k in cfg:\n setattr(namespace, k, cfg[k])\n\n trans_to_tensor = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n sparsity = 0.67\n n_ball = cfg['n_ball']\n param_load = None\n if not cfg['variable_rels']:\n param_load = np.zeros((n_ball * (n_ball - 1) // 2, 2))\n n_rels = len(param_load)\n num_nonzero = int(n_rels * sparsity)\n choice = np.random.choice(n_rels, size=num_nonzero, replace=False)\n param_load[choice, 0] = 1\n param_load[choice, 1] = np.random.rand(num_nonzero) * 10\n\n datasets = {}\n # modMat = np.random.uniform(0, 1, (cfg['n_ball'], 2, cfg['n_class']))\n modMat = np.ones((cfg['n_ball'], 2, cfg['n_class']))\n for phase in range(cfg['n_class']):\n datasets[phase] = PhysicsDataset(namespace, phase=str(phase), trans_to_tensor=trans_to_tensor)\n datasets[phase].gen_data(modVec=modMat[:,:,phase], param_load=param_load)\n\nif __name__ == \"__main__\":\n\n argparser = argparse.ArgumentParser(description=__doc__)\n argparser.add_argument(\n '-e',\n '--exp',\n type=str\n )\n args = argparser.parse_args()\n main(args)\n\n\n\n" ]
[ [ "numpy.zeros", "numpy.random.rand", "numpy.random.choice", "numpy.ones" ] ]
kw-lee/pytorch-DCRNN
[ "49fbf64705360f2d2586a94fb1bde1b2f9599a70" ]
[ "model/dcrnn_cell.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nfrom base import BaseModel\n\n\nclass DiffusionGraphConv(BaseModel):\n def __init__(self, input_dim, hid_dim, max_diffusion_step, output_dim, filter_type=\"random_walk\", bias_start=0.0, sparse_supports=True):\n super(DiffusionGraphConv, self).__init__(filter_type=filter_type,\n sparse_supports=sparse_supports)\n # NUM_MATRICES: ORDER\n self.num_matrices = self._len_supports * max_diffusion_step + 1 # Don't forget to add for x itself.\n input_size = input_dim + hid_dim\n self._max_diffusion_step = max_diffusion_step\n self.weight = nn.Parameter(torch.FloatTensor(size=(input_size*self.num_matrices, output_dim)))\n self.biases = nn.Parameter(torch.FloatTensor(size=(output_dim,)))\n nn.init.xavier_normal_(self.weight.data, gain=1.414)\n nn.init.constant_(self.biases.data, val=bias_start)\n self._mm = torch.sparse.mm if self._sparse_supports else torch.mm\n\n @staticmethod\n def _concat(x, x_):\n x_ = torch.unsqueeze(x_, 0)\n return torch.cat([x, x_], dim=0)\n\n def forward(self, inputs, state, output_size, supports):\n \"\"\"\n Diffusion Graph convolution with graph matrix\n :param inputs: (time_dim, batch_size, num_nodes, input_dim)\n :param state:\n :param output_size:\n :param bias_start:\n :return:\n \"\"\"\n # Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)\n batch_size = inputs.shape[0]\n num_nodes = supports[0].shape[0]\n inputs = torch.reshape(inputs, (batch_size, num_nodes, -1))\n state = torch.reshape(state, (batch_size, num_nodes, -1))\n inputs_and_state = torch.cat([inputs, state], dim=2) # (batch_size, num_nodes, input_DIM + hidden_dim)\n input_size = inputs_and_state.shape[2]\n \n # dtype = inputs.dtype\n\n x = inputs_and_state\n x0 = torch.transpose(x, dim0=0, dim1=1)\n x0 = torch.transpose(x0, dim0=1, dim1=2) # (num_nodes, total_arg_size, batch_size)\n x0 = torch.reshape(x0, shape=[num_nodes, input_size * batch_size])\n x = torch.unsqueeze(x0, dim=0)\n\n if self._max_diffusion_step == 0:\n pass\n else:\n for support in supports:\n x1 = self._mm(support, x0)\n x = self._concat(x, x1)\n for k in range(2, self._max_diffusion_step + 1):\n x2 = 2 * self._mm(support, x1) - x0\n x = self._concat(x, x2)\n x1, x0 = x2, x1\n\n x = torch.reshape(x, shape=[self.num_matrices, num_nodes, input_size, batch_size])\n x = torch.transpose(x, dim0=0, dim1=3) # (batch_size, num_nodes, input_size, order)\n \n x = torch.reshape(x, shape=[batch_size * num_nodes, input_size * self.num_matrices])\n\n\n x = torch.matmul(x, self.weight) # (batch_size * num_nodes, output_size)\n x = torch.add(x, self.biases)\n # Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)\n return torch.reshape(x, [batch_size, num_nodes * output_size])\n\nclass DCGRUCell(BaseModel):\n \"\"\"\n Graph Convolution Gated Recurrent Unit Cell.\n \"\"\"\n def __init__(self, input_dim, num_units, max_diffusion_step,\n num_proj=None, activation=torch.tanh, use_gc_for_ru=True, \n filter_type=\"random_walk\", \n sparse_supports=True):\n \"\"\"\n :param num_units: the hidden dim of rnn\n :param adj_mat: the (weighted) adjacency matrix of the graph, in numpy ndarray form\n :param max_diffusion_step: the max diffusion step\n :param num_nodes:\n :param num_proj: num of output dim, defaults to 1 (speed)\n :param activation: if None, don't do activation for cell state\n :param use_gc_for_ru: decide whether to use graph convolution inside rnn\n \"\"\"\n super(DCGRUCell, self).__init__(filter_type=filter_type,\n sparse_supports=sparse_supports)\n self._activation = activation\n self._num_units = num_units\n self._max_diffusion_step = max_diffusion_step\n self._num_proj = num_proj\n self._use_gc_for_ru = use_gc_for_ru\n self._input_dim = input_dim\n\n # supports = utils.calculate_scaled_laplacian(adj_mat, lambda_max=None) # scipy coo matrix\n # supports = self._build_sparse_matrix(supports).cuda() # to pytorch sparse tensor\n\n self.dconv_gate = DiffusionGraphConv(input_dim=self._input_dim,\n hid_dim=num_units,\n max_diffusion_step=max_diffusion_step,\n output_dim=num_units*2,\n bias_start=1.0,\n filter_type=self._filter_type,\n sparse_supports=self._sparse_supports)\n self.dconv_candidate = DiffusionGraphConv(input_dim=self._input_dim,\n hid_dim=num_units,\n max_diffusion_step=max_diffusion_step,\n output_dim=num_units,\n bias_start=1.0,\n filter_type=self._filter_type,\n sparse_supports=self._sparse_supports)\n if num_proj is not None:\n self.project = nn.Linear(self._num_units, self._num_proj)\n\n # @property\n # def output_size(self):\n # output_size = self._num_nodes * self._num_units\n # if self._num_proj is not None:\n # output_size = self._num_nodes * self._num_proj\n # return output_size\n\n def forward(self, inputs, state, supports):\n \"\"\"\n :param inputs: (B, num_nodes * input_dim)\n :param state: (B, num_nodes * num_units)\n :return:\n \"\"\"\n output_size = 2 * self._num_units\n num_nodes = supports[0].shape[0]\n # we start with bias 1.0 to not reset and not update\n if self._use_gc_for_ru:\n fn = self.dconv_gate\n else:\n fn = self._fc\n \n value = torch.sigmoid(fn(inputs, state, output_size, supports))\n value = torch.reshape(value, (-1, num_nodes, output_size))\n r, u = torch.split(value, split_size_or_sections=int(output_size/2), dim=-1)\n r = torch.reshape(r, (-1, num_nodes * self._num_units))\n u = torch.reshape(u, (-1, num_nodes * self._num_units))\n c = self.dconv_candidate(inputs, r * state, self._num_units, supports) # batch_size, num_nodes * output_size\n if self._activation is not None:\n c = self._activation(c)\n output = new_state = u * state + (1 - u) * c\n if self._num_proj is not None:\n # apply linear projection to state\n batch_size = inputs.shape[0]\n output = torch.reshape(new_state, shape=(-1, self._num_units)) # (batch*num_nodes, num_units)\n output = torch.reshape(self.project(output), shape=(batch_size, -1)) # (50, 207*1)\n return output, new_state\n\n @staticmethod\n def _concat(x, x_):\n x_ = torch.unsqueeze(x_, 0)\n return torch.cat([x, x_], dim=0)\n\n def _gconv(self, inputs, state, output_size, bias_start=0.0):\n pass\n\n def _fc(self, inputs, state, output_size, bias_start=0.0):\n pass\n\n def init_hidden(self, batch_size, num_nodes, device=torch.device('cpu')):\n # state: (B, num_nodes * num_units)\n return torch.zeros(batch_size, num_nodes * self._num_units, device=device)\n" ]
[ [ "torch.transpose", "torch.add", "torch.cat", "torch.nn.init.constant_", "torch.zeros", "torch.reshape", "torch.nn.init.xavier_normal_", "torch.unsqueeze", "torch.matmul", "torch.nn.Linear", "torch.FloatTensor", "torch.device" ] ]
kaland313/A3C-CarRacingGym
[ "62fe7e033a0fc4a58101e8a06716cbf386c8bd9f" ]
[ "Scripts/CartPole/a3c_cartpole.py" ]
[ "import os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\nimport threading\nimport gym\nimport multiprocessing\nimport numpy as np\nfrom queue import Queue\nimport argparse\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.python import keras\nfrom tensorflow.python.keras import layers\n\ntf.enable_eager_execution()\n\n#############################################################\n# Command line argument parser\n#############################################################\nparser = argparse.ArgumentParser(description='Run A3C algorithm on the game '\n 'Cartpole.')\nparser.add_argument('--algorithm', default='a3c', type=str,\n help='Choose between \\'a3c\\' and \\'random\\'.')\nparser.add_argument('--train', dest='train', action='store_true',\n help='Train our model.')\nparser.add_argument('--lr', default=0.001,\n help='Learning rate for the shared optimizer.')\nparser.add_argument('--update-freq', default=20, type=int,\n help='How often to update the global model.')\nparser.add_argument('--max-eps', default=1000, type=int,\n help='Global maximum number of episodes to run.')\nparser.add_argument('--gamma', default=0.99,\n help='Discount factor of rewards.')\nparser.add_argument('--save-dir', default='../../Outputs/', type=str,\n help='Directory in which you desire to save the model.')\nargs = parser.parse_args()\n\n\n#############################################################\n# Helper functions\n#############################################################\n\ndef record(episode, episode_reward, worker_idx, global_ep_reward, result_queue, total_loss, num_steps):\n \"\"\"Helper function to store score and print statistics.\n\n :param episode: Current episode\n :param episode_reward: Reward accumulated over the current episode\n :param worker_idx: Which thread (worker)\n :param global_ep_reward: The moving average of the global reward\n :param result_queue: Queue storing the moving average of the scores\n :param total_loss: The total loss accumualted over the current episode\n :param num_steps: The number of steps the episode took to complete\n \"\"\"\n if global_ep_reward == 0:\n global_ep_reward = episode_reward\n else:\n global_ep_reward = global_ep_reward * 0.99 + episode_reward * 0.01\n print(\n 'Episode: ' + str(episode) +' | ' +\n 'Moving Average Reward: ' + str(int(global_ep_reward)) + ' | ' +\n 'Episode Reward: ' +str(int(episode_reward)) + ' | ' +\n 'Loss: ' + str(int(total_loss / float(num_steps) * 1000) / 1000) + ' | ' +\n 'Steps: ' + str(num_steps) + ' | ' +\n 'Worker: ' + str(worker_idx)\n )\n result_queue.put(global_ep_reward)\n return global_ep_reward\n\n\n#############################################################\n# Random agent\n#############################################################\nclass RandomAgent:\n \"\"\"Random Agent that will play the specified game.\n\n :param env_name: Name of the environment to be played\n :param max_eps: Maximum number of episodes to run agent for.\n \"\"\"\n\n def __init__(self, env_name, max_eps):\n self.env = gym.make(env_name)\n self.max_episodes = max_eps\n self.global_moving_average_reward = 0\n self.res_queue = Queue()\n\n def run(self):\n reward_avg = 0\n for episode in range(self.max_episodes):\n done = False\n self.env.reset()\n reward_sum = 0.0\n steps = 0\n while not done:\n # Sample randomly from the action space and step\n _, reward, done, _ = self.env.step(self.env.action_space.sample())\n steps += 1\n reward_sum += reward\n # Record statistics\n self.global_moving_average_reward = record(episode,\n reward_sum,\n 0,\n self.global_moving_average_reward,\n self.res_queue, 0, steps)\n\n reward_avg += reward_sum\n final_avg = reward_avg / float(self.max_episodes)\n print(\"Average score across {} episodes: {}\".format(self.max_episodes, final_avg))\n return final_avg\n\n\n#############################################################\n# Running the random agent\n#############################################################\n# randomAgent = RandomAgent('CartPole-v0', 4000)\n# randomAgent.run()\n\n\n#############################################################\n# Actor-critic model definition\n#############################################################\nclass ActorCriticModel(keras.Model):\n def __init__(self, state_size, action_size):\n super(ActorCriticModel, self).__init__()\n self.state_size = state_size\n self.action_size = action_size\n self.dense1 = layers.Dense(100, activation='relu')\n self.policy_logits = layers.Dense(action_size)\n self.dense2 = layers.Dense(100, activation='relu')\n self.values = layers.Dense(1)\n\n def call(self, inputs):\n # Forward pass\n x = self.dense1(inputs)\n logits = self.policy_logits(x)\n v1 = self.dense2(inputs)\n values = self.values(v1)\n return logits, values\n\n\n#############################################################\n# Master agent definition\n#############################################################\nclass MasterAgent():\n def __init__(self):\n # Logging directory setup\n self.game_name = 'CartPole-v0'\n save_dir = args.save_dir\n self.save_dir = save_dir\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # Get input and output parameters and instantiate global network\n env = gym.make(self.game_name)\n self.state_size = env.observation_space.shape[0]\n self.action_size = env.action_space.n\n print(self.state_size, self.action_size)\n self.global_model = ActorCriticModel(self.state_size, self.action_size) # global network\n self.global_model(tf.convert_to_tensor(np.random.random((1, self.state_size)), dtype=tf.float32))\n\n # Instantiate optimizer\n self.opt = tf.train.AdamOptimizer(args.lr, use_locking=True)\n\n def train(self):\n if args.algorithm == 'random':\n random_agent = RandomAgent(self.game_name, args.max_eps)\n random_agent.run()\n return\n\n res_queue = Queue()\n\n workers = [Worker(self.state_size, self.action_size, self.global_model, self.opt,\n res_queue,\n i, game_name=self.game_name,\n save_dir=self.save_dir) for i in range(multiprocessing.cpu_count())]\n\n for i, worker in enumerate(workers):\n print(\"Starting worker {}\".format(i))\n worker.start()\n\n moving_average_rewards = [] # record episode reward to plot\n while True:\n reward = res_queue.get()\n if reward is not None:\n moving_average_rewards.append(reward)\n else:\n break\n [w.join() for w in workers]\n\n plt.plot(moving_average_rewards)\n plt.ylabel('Moving average ep reward')\n plt.xlabel('Step')\n plt.savefig(os.path.join(self.save_dir,\n '{} Moving Average.png'.format(self.game_name)))\n plt.show()\n\n def play(self):\n env = gym.make(self.game_name).unwrapped\n state = env.reset()\n model = self.global_model\n model_path = os.path.join(self.save_dir, 'model_{}.h5'.format(self.game_name))\n print('Loading model from: {}'.format(model_path))\n model.load_weights(model_path)\n done = False\n step_counter = 0\n reward_sum = 0\n\n try:\n while not done:\n env.render(mode='rgb_array')\n policy, value = model(tf.convert_to_tensor(state[None, :], dtype=tf.float32))\n policy = tf.nn.softmax(policy)\n action = np.argmax(policy)\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n print(\"{}. Reward: {}, action: {}\".format(step_counter, reward_sum, action))\n step_counter += 1\n except KeyboardInterrupt:\n print(\"Received Keyboard Interrupt. Shutting down.\")\n finally:\n env.close()\n\n\n#############################################################\n# Memory class\n#############################################################\nclass Memory:\n def __init__(self):\n self.states = []\n self.actions = []\n self.rewards = []\n\n def store(self, state, action, reward):\n self.states.append(state)\n self.actions.append(action)\n self.rewards.append(reward)\n\n def clear(self):\n self.states = []\n self.actions = []\n self.rewards = []\n\n\n#############################################################\n# Worker agent definition\n#############################################################\nclass Worker(threading.Thread):\n # Set up global variables across different threads\n global_episode = 0\n # Moving average reward\n global_moving_average_reward = 0\n best_score = 0\n save_lock = threading.Lock()\n\n def __init__(self, state_size, action_size, global_model, opt, result_queue, idx, game_name='CartPole-v0',\n save_dir='/tmp'):\n super(Worker, self).__init__()\n self.state_size = state_size\n self.action_size = action_size\n self.result_queue = result_queue\n self.global_model = global_model\n self.opt = opt\n self.local_model = ActorCriticModel(self.state_size, self.action_size)\n self.worker_idx = idx\n self.game_name = game_name\n self.env = gym.make(self.game_name).unwrapped\n self.save_dir = save_dir\n self.ep_loss = 0.0\n\n def run(self):\n total_step = 1\n mem = Memory()\n while Worker.global_episode < args.max_eps:\n current_state = self.env.reset() #Returns: observation (object): the initial observation of the env\n mem.clear()\n ep_reward = 0.\n ep_steps = 0\n self.ep_loss = 0\n\n time_count = 0\n done = False\n while not done:\n logits, _ = self.local_model(\n tf.convert_to_tensor(current_state[None, :],\n dtype=tf.float32))\n probs = tf.nn.softmax(logits)\n\n action = np.random.choice(self.action_size, p=probs.numpy()[0])\n new_state, reward, done, _ = self.env.step(action)\n print(\"new_state\", new_state.shape)\n if done:\n reward = -1\n ep_reward += reward\n mem.store(current_state, action, reward)\n\n if time_count == args.update_freq or done:\n # Calculate gradient wrt to local model. We do so by tracking the\n # variables involved in computing the loss by using tf.GradientTape\n print(\"GRAD new_state\", new_state.shape)\n with tf.GradientTape() as tape:\n total_loss = self.compute_loss(done,\n new_state,\n mem,\n args.gamma)\n self.ep_loss += total_loss\n # Calculate local gradients\n grads = tape.gradient(total_loss, self.local_model.trainable_weights)\n # Push local gradients to global model\n self.opt.apply_gradients(zip(grads,\n self.global_model.trainable_weights))\n # Update local model with new weights\n self.local_model.set_weights(self.global_model.get_weights())\n\n mem.clear()\n time_count = 0\n\n if done: # done and print information\n Worker.global_moving_average_reward = \\\n record(Worker.global_episode, ep_reward, self.worker_idx,\n Worker.global_moving_average_reward, self.result_queue,\n self.ep_loss, ep_steps)\n # We must use a lock to save our model and to print to prevent data races.\n if ep_reward > Worker.best_score:\n with Worker.save_lock:\n print(\"Saving best model to {}, \"\n \"episode score: {}\".format(self.save_dir, ep_reward))\n self.global_model.save_weights(\n os.path.join(self.save_dir,\n 'model_{}.h5'.format(self.game_name))\n )\n Worker.best_score = ep_reward\n Worker.global_episode += 1\n ep_steps += 1\n\n time_count += 1\n current_state = new_state\n total_step += 1\n self.result_queue.put(None)\n\n def compute_loss(self,\n done,\n new_state,\n memory,\n gamma=0.99):\n if done:\n reward_sum = 0. # terminal\n else:\n reward_sum = self.local_model(\n tf.convert_to_tensor(new_state[None, :],\n dtype=tf.float32))[-1].numpy()[0]\n\n # Get discounted rewards\n discounted_rewards = []\n for reward in memory.rewards[::-1]: # reverse buffer r\n reward_sum = reward + gamma * reward_sum\n discounted_rewards.append(reward_sum)\n discounted_rewards.reverse()\n\n print(\"memory shape\", memory.states.shape)\n logits, values = self.local_model(\n tf.convert_to_tensor(np.vstack(memory.states),\n dtype=tf.float32))\n # Get our advantages\n advantage = tf.convert_to_tensor(np.array(discounted_rewards)[:, None],\n dtype=tf.float32) - values\n # Value loss\n value_loss = advantage ** 2\n\n # Calculate our policy loss\n actions_one_hot = tf.one_hot(memory.actions, self.action_size, dtype=tf.float32)\n\n policy = tf.nn.softmax(logits)\n entropy = tf.reduce_sum(policy * tf.log(policy + 1e-20), axis=1)\n\n policy_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=actions_one_hot,\n logits=logits)\n policy_loss *= tf.stop_gradient(advantage)\n policy_loss -= 0.01 * entropy\n total_loss = tf.reduce_mean((0.5 * value_loss + policy_loss))\n return total_loss\n\n\n#############################################################\n# The main\n#############################################################\nif __name__ == '__main__':\n print(args)\n master = MasterAgent()\n if args.train:\n master.train()\n else:\n master.play()\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.enable_eager_execution", "tensorflow.python.keras.layers.Dense", "matplotlib.pyplot.plot", "tensorflow.train.AdamOptimizer", "tensorflow.stop_gradient", "numpy.argmax", "tensorflow.one_hot", "numpy.array", "matplotlib.pyplot.show", "tensorflow.GradientTape", "matplotlib.pyplot.ylabel", "tensorflow.nn.softmax", "numpy.random.random", "tensorflow.reduce_mean", "tensorflow.log", "tensorflow.nn.softmax_cross_entropy_with_logits_v2", "matplotlib.pyplot.xlabel", "numpy.vstack" ] ]
fgsect/fexm
[ "ca6629bbcbf79639871d3ec52bc2a7de9ae453a4" ]
[ "fexm/evalscripts/dependency_graph.py" ]
[ "import sys\n\nimport networkx as nx\nimport pandas as pd\n\n\ndef main(package_csv: str, package: str):\n package_dict = {}\n count = 0\n df = pd.read_csv(package_csv)\n df.fillna(\"\")\n dep_graph = nx.DiGraph()\n for index, row in df.iterrows():\n dependencies = str(row[\"depends\"]).split(\" \") if str(row[\"depends\"]) != \"nan\" else []\n dependencies += str(row[\"makedepends\"]).split(\" \") if str(row[\"makedepends\"]) != \"nan\" else []\n dependencies += str(row[\"opt_depends\"]).split(\" \") if str(row[\"opt_depends\"]) != \"nan\" else []\n dependencies = set(dependencies)\n if row[\"package\"] not in package_dict:\n package_dict[row[\"package\"]] = row[\"package\"]\n count += 1\n dep_graph.add_node(package_dict[row[\"package\"]])\n for dep in dependencies:\n if dep not in package_dict:\n package_dict[dep] = dep\n count += 1\n dep_graph.add_node(package_dict[dep])\n dep_graph.add_edge(package_dict[row[\"package\"]], package_dict[dep])\n print(\"Graph done! ######\")\n depend_counter = 0\n for index, row in df.iterrows():\n if package in nx.neighbors(dep_graph, package_dict[row[\"package\"]]):\n print(\"{0} directly depends on {1}\".format(row[\"package\"], package))\n depend_counter += 1\n # print(\"For \",row[\"package\"])\n # print(nx.descendants(dep_graph,package_dict[row[\"package\"]]))\n print(\"{0} is used in \".format(package), depend_counter, \"packages\")\n # nx.draw(dep_graph)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1], sys.argv[2])\n" ]
[ [ "pandas.read_csv" ] ]
twangnh/Distilling-Object-Detectors-Shuffledet
[ "6ad638451aac52cc6f4fb94944c0c394a5ad6139" ]
[ "lib/config/kitti_shuffledet_config.py" ]
[ "import numpy as np\n\nfrom config import base_model_config\n\ndef kitti_shuffledet_config():\n \"\"\"Specify the parameters to tune below.\"\"\"\n mc = base_model_config('KITTI')\n\n mc.IMAGE_WIDTH = 1248\n mc.IMAGE_HEIGHT = 384\n # mc.IMAGE_WIDTH = 560\n # mc.IMAGE_HEIGHT = 180\n\n mc.BATCH_SIZE = 8\n\n mc.WEIGHT_DECAY = 0.0005\n mc.LEARNING_RATE = 0.01\n mc.DECAY_STEPS = 10000\n mc.MAX_GRAD_NORM = 1.0\n mc.MOMENTUM = 0.9\n mc.LR_DECAY_FACTOR = 0.5\n\n mc.LOSS_COEF_BBOX = 5.0\n mc.LOSS_COEF_CONF_POS = 75.0\n mc.LOSS_COEF_CONF_NEG = 100.0\n mc.LOSS_COEF_CLASS = 1.0\n\n mc.PLOT_PROB_THRESH = 0.4\n mc.NMS_THRESH = 0.4\n mc.PROB_THRESH = 0.005\n mc.TOP_N_DETECTION = 64\n\n mc.DATA_AUGMENTATION = True\n # mc.DRIFT_X = 150/(1248./560)\n # mc.DRIFT_Y = 100/(384./180)\n mc.DRIFT_X = 150\n mc.DRIFT_Y = 100\n mc.EXCLUDE_HARD_EXAMPLES = False\n\n mc.ANCHOR_BOX = set_anchors(mc)\n mc.ANCHORS = len(mc.ANCHOR_BOX)\n mc.ANCHOR_PER_GRID = 9\n\n return mc\n\ndef set_anchors(mc):\n H, W, B = mc.IMAGE_HEIGHT // 16, mc.IMAGE_WIDTH // 16, 9\n #H, W, B = 12, 35, 9\n #original anchors\n anchor_shape_base = np.array(\n [[ 36., 37.], [ 366., 174.], [ 115., 59.],\n [ 162., 87.], [ 38., 90.], [ 258., 173.],\n [ 224., 108.], [ 78., 170.], [ 72., 43.]])\n\n # randomly modified anchors\n # anchor_shape_base = np.array(\n # [[ 50., 50.], [ 320., 180.], [ 90., 48.],\n # [ 180., 100.], [ 50., 120.], [ 200., 130.],\n # [ 180., 80.], [ 90., 190.], [ 100., 60.]])\n\n # anchor_shape_base = np.array(\n # [[20.63007745, 45.40804647],\n # [69.9036478, 153.81476415],\n # [135.64310606, 213.72166667],\n # [39.594868, 86.59731785],\n # [209.20414977, 127.49268851],\n # [75.59330804, 47.45570814],\n # [337.28631668, 174.02375953],\n # [130.50749455, 72.92875091],\n # [38.78412702, 28.50398895]])\n\n\n anchor_shapes = np.reshape(\n [anchor_shape_base] * H * W,\n (H, W, B, 2)\n )\n center_x = np.reshape(\n np.transpose(\n np.reshape(\n np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B), \n (B, H, W)\n ),\n (1, 2, 0)\n ),\n (H, W, B, 1)\n )\n center_y = np.reshape(\n np.transpose(\n np.reshape(\n np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),\n (B, W, H)\n ),\n (2, 1, 0)\n ),\n (H, W, B, 1)\n )\n anchors = np.reshape(\n np.concatenate((center_x, center_y, anchor_shapes), axis=3),\n (-1, 4)\n )\n\n return anchors\n" ]
[ [ "numpy.reshape", "numpy.arange", "numpy.array", "numpy.concatenate" ] ]
asijit123/Python
[ "30050ab3aa7f89eb75e142bd5dfc9987861284a6" ]
[ "pan.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom matplotlib import *\n\n#.........................Series.......................#\n\nx1=np.array([1,2,3,4])\ns=pd.Series(x1,index=[1,2,3,4])\nprint(s)\n\n#.......................DataFrame......................#\n\nx2=np.array([1,2,3,4,5,6])\ns=pd.DataFrame(x2)\nprint(s)\n\nx3=np.array([['Alex',10],['Nishit',21],['Aman',22]])\ns=pd.DataFrame(x3,columns=['Name','Age'])\nprint(s)\n\ndata = {'Name':['Tom', 'Jack', 'Steve', 'Ricky'],'Age':[28,34,29,42]}\ndf = pd.DataFrame(data, index=['rank1','rank2','rank3','rank4'])\nprint (df)\n\ndata=[{'a':1,'b':2},{'a':3,'b':4,'c':5}]\ndf=pd.DataFrame(data)\nprint(df)\n\n\nd = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),\n 'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}\ndf = pd.DataFrame(d)\nprint (df)\n\n#....Adding New column......#\n\ndata={'one':pd.Series([1,2,3,4],index=[1,2,3,4]),\n\t'two':pd.Series([1,2,3],index=[1,2,3])}\ndf=pd.DataFrame(data)\nprint(df)\ndf['three']=pd.Series([1,2],index=[1,2])\nprint(df)\n\n#......Deleting a column......#\n\ndata={'one':pd.Series([1,2,3,4],index=[1,2,3,4]),\n\t'two':pd.Series([1,2,3],index=[1,2,3]),\n\t'three':pd.Series([1,1],index=[1,2])\n\t}\ndf=pd.DataFrame(data)\nprint(df)\ndel df['one']\nprint(df)\ndf.pop('two')\nprint(df)\t\n\n#......Selecting a particular Row............#\n\ndata={'one':pd.Series([1,2,3,4],index=[1,2,3,4]),\n\t'two':pd.Series([1,2,3],index=[1,2,3]),\n\t'three':pd.Series([1,1],index=[1,2])\n\t}\ndf=pd.DataFrame(data)\nprint(df.loc[2])\nprint(df[1:4])\t\n\n#.........Addition of Row.................#\n\ndf = pd.DataFrame([[1, 2], [3, 4]], columns = ['a','b'])\ndf2 = pd.DataFrame([[5, 6], [7, 8]], columns = ['a','b'])\n\ndf = df.append(df2)\nprint (df.head())\n\t\n\n#........Deleting a Row..................#\n\ndf = pd.DataFrame([[1, 2], [3, 4]], columns = ['a','b'])\ndf2 = pd.DataFrame([[5, 6], [7, 8]], columns = ['a','b'])\n\ndf = df.append(df2)\n\n# Drop rows with label 0\ndf = df.drop(0)\n\nprint (df)\n\n#..........................Functions.....................................#\n\n\n\nd = {'Name':pd.Series(['Tom','James','Ricky','Vin','Steve','Smith','Jack']),\n 'Age':pd.Series([25,26,25,23,30,29,23]),\n 'Rating':pd.Series([4.23,3.24,3.98,2.56,3.20,4.6,3.8])}\n\ndf = pd.DataFrame(d)\nprint (\"The transpose of the data series is:\")\nprint (df.T)\nprint(df.shape)\nprint(df.size)\nprint(df.values)\n\n#.........................Statistics.......................................#\n\nd = {'Name':pd.Series(['Tom','James','Ricky','Vin','Steve','Smith','Jack',\n 'Lee','David','Gasper','Betina','Andres']),\n 'Age':pd.Series([25,26,25,23,30,29,23,34,40,30,51,46]),\n 'Rating':pd.Series([4.23,3.24,3.98,2.56,3.20,4.6,3.8,3.78,2.98,4.80,4.10,3.65])\n}\ndf = pd.DataFrame(d)\nprint (df.sum())\n\n\nd = {'Name':pd.Series(['Tom','James','Ricky','Vin','Steve','Smith','Jack',\n 'Lee','David','Gasper','Betina','Andres']),\n 'Age':pd.Series([25,26,25,23,30,29,23,34,40,30,51,46]),\n 'Rating':pd.Series([4.23,3.24,3.98,2.56,3.20,4.6,3.8,3.78,2.98,4.80,4.10,3.65])\n}\ndf = pd.DataFrame(d)\nprint (df.describe(include='all'))\n\n\n#.......................Sorting..........................................#\n\n#Using the sort_index() method, by passing the axis arguments and the order of sorting,\n# DataFrame can be sorted. By default, sorting is done on row labels in ascending order.\n\nunsorted_df = pd.DataFrame(np.random.randn(10,2),index=[1,4,6,2,3,5,9,8,0,7],columns = ['col2','col1'])\n\nsorted_df=unsorted_df.sort_index()\nprint (sorted_df)\nsorted_df = unsorted_df.sort_index(ascending=False)\nprint (sorted_df)\n\n#By passing the axis argument with a value 0 or 1, \n#the sorting can be done on the column labels. By default, axis=0, sort by row. \n#Let us consider the following example to understand the same.\n\nunsorted_df = pd.DataFrame(np.random.randn(10,2),index=[1,4,6,2,3,5,9,8,0,7],columns = ['col2','col1'])\nsorted_df=unsorted_df.sort_index(axis=1)\nprint(sorted_df)\n\nunsorted_df = pd.DataFrame({'col1':[2,1,1,1],'col2':[1,3,2,4]})\nsorted_df = unsorted_df.sort_values(by='col1',kind='mergesort')\n\n# print (sorted_df)\n\n#...........................SLICING...............................#\n\ndf = pd.DataFrame(np.random.randn(8, 4),\nindex = ['a','b','c','d','e','f','g','h'], columns = ['A', 'B', 'C', 'D'])\n# Select all rows for multiple columns, say list[]\nprint (df.loc[:,['A','C']])\nprint (df.loc[['a','b','f','h'],['A','C']])\n\n\ndf = pd.DataFrame(np.random.randn(8, 4), columns = ['A', 'B', 'C', 'D'])\n# Index slicing\nprint(df.ix[:,'A'])\n\n#............................statistics......................#\n\ns = pd.Series([1,2,3,4,5,4])\nprint(s.pct_change())\n\ndf = pd.DataFrame(np.random.randn(5, 2))\nprint (df.pct_change())\n\ndf = pd.DataFrame(np.random.randn(10, 4),\n index = pd.date_range('1/1/2000', periods=10),\n columns = ['A', 'B', 'C', 'D'])\nprint(df.rolling(window=3).mean())\n\nprint (df.expanding(min_periods=3).mean())\n\n\n#........................MISSING DATA............................................#\n\ndf = pd.DataFrame(np.random.randn(3, 3), index=['a', 'c', 'e'],columns=['one',\n'two', 'three'])\n\ndf = df.reindex(['a', 'b', 'c'])\n\nprint(df)\nprint (\"NaN replaced with '0':\")\nprint(df.fillna(0))\n\n\ndf = pd.DataFrame(np.random.randn(5, 3), index=['a', 'c', 'e', 'f',\n'h'],columns=['one', 'two', 'three'])\n\ndf = df.reindex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])\n\nprint(df)\nprint(df.fillna(method='pad'))\nprint(df.fillna(method='bfill'))\nprint(df.dropna())\nprint(df.dropna(axis=1))\n\n\n#.........................Grouping...............................................#\n\nipl_data = {'Team': ['Riders', 'Riders', 'Devils', 'Devils', 'Kings',\n 'kings', 'Kings', 'Kings', 'Riders', 'Royals', 'Royals', 'Riders'],\n 'Rank': [1, 2, 2, 3, 3,4 ,1 ,1,2 , 4,1,2],\n 'Year': [2014,2015,2014,2015,2014,2015,2016,2017,2016,2014,2015,2017],\n 'Points':[876,789,863,673,741,812,756,788,694,701,804,690]}\ndf = pd.DataFrame(ipl_data)\n\ngrouped = df.groupby('Year')\n\nfor name,group in grouped:\n print (name)\n print (group)\n\nprint (grouped.get_group(2014))\ngrouped = df.groupby('Team')\nprint (grouped['Points'].agg([np.sum, np.mean, np.std]))\n\n\n\n#...............................Reading a Csv File............................#\n\ndata=pd.read_csv(\"dat.csv\")\nprint(data)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.read_csv", "pandas.Series", "pandas.DataFrame", "numpy.random.randn", "pandas.date_range", "numpy.array" ] ]
knitemblazor/MaskRCNN_pytorch_1.8_non_detectron
[ "2956d1d831ee0c518bd5cf588a952cd9c91a74c1" ]
[ "tests/crop_and_resize_example.py" ]
[ "import torch\nfrom torch import nn\nfrom torchvision import transforms, utils\nfrom torch.autograd import Variable, gradcheck\nfrom roi_align.crop_and_resize import CropAndResizeFunction\nimport matplotlib.pyplot as plt\nfrom skimage.io import imread\n\n\ndef to_varabile(tensor, requires_grad=False, is_cuda=True):\n if is_cuda:\n tensor = tensor.cuda()\n var = Variable(tensor, requires_grad=requires_grad)\n return var\n\n\ncrop_height = 500\ncrop_width = 500\nis_cuda = torch.cuda.is_available()\n\n# In this simple example the number of images and boxes is 2\nimg_path1 = 'tests/images/choco.png'\nimg_path2 = 'tests/images/snow.png'\n\n# Define the boxes ( crops )\n# box = [y1/heigth , x1/width , y2/heigth , x2/width]\nboxes_data = torch.FloatTensor([[0, 0, 1, 1], [0, 0, 0.5, 0.5]])\n\n# Create an index to say which box crops which image\nbox_index_data = torch.IntTensor([0, 1])\n\n# Import the images from file\nimage_data1 = transforms.ToTensor()(imread(img_path1)).unsqueeze(0)\nimage_data2 = transforms.ToTensor()(imread(img_path2)).unsqueeze(0)\n\n# Create a batch of 2 images\nimage_data = torch.cat((image_data1, image_data2), 0)\n\n# Convert from numpy to Variables\nimage_torch = to_varabile(image_data, is_cuda=is_cuda)\nboxes = to_varabile(boxes_data, is_cuda=is_cuda)\nbox_index = to_varabile(box_index_data, is_cuda=is_cuda)\n\n# Crops and resize bbox1 from img1 and bbox2 from img2\ncrops_torch = CropAndResizeFunction.apply(image_torch, boxes, box_index, crop_height, crop_width, 0)\n\n# Visualize the crops\nprint(crops_torch.data.size())\ncrops_torch_data = crops_torch.data.cpu().numpy().transpose(0, 2, 3, 1)\nfig = plt.figure()\nplt.subplot(121)\nplt.imshow(crops_torch_data[0])\nplt.subplot(122)\nplt.imshow(crops_torch_data[1])\nplt.show()\n" ]
[ [ "matplotlib.pyplot.imshow", "torch.cat", "torch.autograd.Variable", "matplotlib.pyplot.subplot", "torch.FloatTensor", "torch.cuda.is_available", "torch.IntTensor", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
mkoculak/mne-python
[ "c6291eb1b9bc943e7e294b3147e4f5aafd82cbd8" ]
[ "mne/viz/tests/test_evoked.py" ]
[ "# Authors: Alexandre Gramfort <[email protected]>\n# Denis Engemann <[email protected]>\n# Martin Luessi <[email protected]>\n# Eric Larson <[email protected]>\n# Cathy Nangini <[email protected]>\n# Mainak Jas <[email protected]>\n# Jona Sassenhagen <[email protected]>\n# Daniel McCloy <[email protected]>\n#\n# License: Simplified BSD\n\nimport os.path as op\n\nimport numpy as np\nimport pytest\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom matplotlib.cm import get_cmap\n\nimport mne\nfrom mne import (read_events, Epochs, read_cov, compute_covariance,\n make_fixed_length_events)\nfrom mne.io import read_raw_fif\nfrom mne.utils import run_tests_if_main, catch_logging\nfrom mne.viz import plot_compare_evokeds, plot_evoked_white\nfrom mne.viz.utils import _fake_click\nfrom mne.datasets import testing\nfrom mne.io.constants import FIFF\nfrom mne.stats.parametric import _parametric_ci\n\nbase_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')\nevoked_fname = op.join(base_dir, 'test-ave.fif')\nraw_fname = op.join(base_dir, 'test_raw.fif')\nraw_sss_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')\ncov_fname = op.join(base_dir, 'test-cov.fif')\nevent_name = op.join(base_dir, 'test-eve.fif')\nevent_id, tmin, tmax = 1, -0.1, 0.1\n\n# Use a subset of channels for plotting speed\n# make sure we have a magnetometer and a pair of grad pairs for topomap.\npicks = [0, 1, 2, 3, 4, 6, 7, 61, 122, 183, 244, 305]\nsel = [0, 7]\n\n\ndef _get_epochs(picks=picks):\n \"\"\"Get epochs.\"\"\"\n raw = read_raw_fif(raw_fname)\n raw.add_proj([], remove_existing=True)\n events = read_events(event_name)\n epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,\n decim=10, verbose='error')\n epochs.info['bads'] = [epochs.ch_names[-1]]\n epochs.info.normalize_proj()\n return epochs\n\n\ndef _get_epochs_delayed_ssp():\n \"\"\"Get epochs with delayed SSP.\"\"\"\n raw = read_raw_fif(raw_fname)\n events = read_events(event_name)\n reject = dict(mag=4e-12)\n epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,\n picks=picks, proj='delayed', reject=reject,\n verbose='error')\n epochs_delayed_ssp.info.normalize_proj()\n return epochs_delayed_ssp\n\n\ndef test_plot_evoked_cov():\n \"\"\"Test plot_evoked with noise_cov.\"\"\"\n evoked = _get_epochs().average()\n cov = read_cov(cov_fname)\n cov['projs'] = [] # avoid warnings\n evoked.plot(noise_cov=cov, time_unit='s')\n with pytest.raises(TypeError, match='Covariance'):\n evoked.plot(noise_cov=1., time_unit='s')\n with pytest.raises(IOError, match='No such file'):\n evoked.plot(noise_cov='nonexistent-cov.fif', time_unit='s')\n raw = read_raw_fif(raw_sss_fname)\n events = make_fixed_length_events(raw)\n epochs = Epochs(raw, events, picks=picks)\n cov = compute_covariance(epochs)\n evoked_sss = epochs.average()\n with pytest.warns(RuntimeWarning, match='relative scaling'):\n evoked_sss.plot(noise_cov=cov, time_unit='s')\n plt.close('all')\n\n\[email protected]\ndef test_plot_evoked():\n \"\"\"Test evoked.plot.\"\"\"\n evoked = _get_epochs().average()\n fig = evoked.plot(proj=True, hline=[1], exclude=[], window_title='foo',\n time_unit='s')\n # Test a click\n ax = fig.get_axes()[0]\n line = ax.lines[0]\n _fake_click(fig, ax,\n [line.get_xdata()[0], line.get_ydata()[0]], 'data')\n _fake_click(fig, ax,\n [ax.get_xlim()[0], ax.get_ylim()[1]], 'data')\n # plot with bad channels excluded & spatial_colors & zorder\n evoked.plot(exclude='bads', time_unit='s')\n\n # test selective updating of dict keys is working.\n evoked.plot(hline=[1], units=dict(mag='femto foo'), time_unit='s')\n evoked_delayed_ssp = _get_epochs_delayed_ssp().average()\n evoked_delayed_ssp.plot(proj='interactive', time_unit='s')\n evoked_delayed_ssp.apply_proj()\n pytest.raises(RuntimeError, evoked_delayed_ssp.plot,\n proj='interactive', time_unit='s')\n evoked_delayed_ssp.info['projs'] = []\n pytest.raises(RuntimeError, evoked_delayed_ssp.plot,\n proj='interactive', time_unit='s')\n pytest.raises(RuntimeError, evoked_delayed_ssp.plot,\n proj='interactive', axes='foo', time_unit='s')\n plt.close('all')\n\n # test GFP only\n evoked.plot(gfp='only', time_unit='s')\n pytest.raises(ValueError, evoked.plot, gfp='foo', time_unit='s')\n\n # plot with bad channels excluded, spatial_colors, zorder & pos. layout\n evoked.rename_channels({'MEG 0133': 'MEG 0000'})\n evoked.plot(exclude=evoked.info['bads'], spatial_colors=True, gfp=True,\n zorder='std', time_unit='s')\n evoked.plot(exclude=[], spatial_colors=True, zorder='unsorted',\n time_unit='s')\n pytest.raises(TypeError, evoked.plot, zorder='asdf', time_unit='s')\n plt.close('all')\n\n evoked.plot_sensors() # Test plot_sensors\n plt.close('all')\n\n evoked.pick_channels(evoked.ch_names[:4])\n with catch_logging() as log_file:\n evoked.plot(verbose=True, time_unit='s')\n assert 'Need more than one' in log_file.getvalue()\n\n\ndef test_plot_evoked_image():\n \"\"\"Test plot_evoked_image.\"\"\"\n evoked = _get_epochs().average()\n evoked.plot_image(proj=True, time_unit='ms')\n\n # fail nicely on NaN\n evoked_nan = evoked.copy()\n evoked_nan.data[:, 0] = np.nan\n pytest.raises(ValueError, evoked_nan.plot)\n with np.errstate(invalid='ignore'):\n pytest.raises(ValueError, evoked_nan.plot_image)\n pytest.raises(ValueError, evoked_nan.plot_joint)\n\n # test mask\n evoked.plot_image(picks=[1, 2], mask=evoked.data > 0, time_unit='s')\n evoked.plot_image(picks=[1, 2], mask_cmap=None, colorbar=False,\n mask=np.ones(evoked.data.shape).astype(bool),\n time_unit='s')\n with pytest.warns(RuntimeWarning, match='not adding contour'):\n evoked.plot_image(picks=[1, 2], mask=None, mask_style=\"both\",\n time_unit='s')\n with pytest.raises(ValueError, match='must have the same shape'):\n evoked.plot_image(mask=evoked.data[1:, 1:] > 0, time_unit='s')\n\n # plot with bad channels excluded\n evoked.plot_image(exclude='bads', cmap='interactive', time_unit='s')\n plt.close('all')\n\n with pytest.raises(ValueError, match='not unique'):\n evoked.plot_image(picks=[0, 0], time_unit='s') # duplicates\n\n ch_names = evoked.ch_names[3:5]\n picks = [evoked.ch_names.index(ch) for ch in ch_names]\n evoked.plot_image(show_names=\"all\", time_unit='s', picks=picks)\n yticklabels = plt.gca().get_yticklabels()\n for tick_target, tick_observed in zip(ch_names, yticklabels):\n assert tick_target in str(tick_observed)\n evoked.plot_image(show_names=True, time_unit='s')\n\n # test groupby\n evoked.plot_image(group_by=dict(sel=sel), axes=dict(sel=plt.axes()))\n plt.close('all')\n for group_by, axes in ((\"something\", dict()), (dict(), \"something\")):\n pytest.raises(ValueError, evoked.plot_image, group_by=group_by,\n axes=axes)\n\n\ndef test_plot_white():\n \"\"\"Test plot_white.\"\"\"\n cov = read_cov(cov_fname)\n cov['method'] = 'empirical'\n cov['projs'] = [] # avoid warnings\n evoked = _get_epochs().average()\n # test rank param.\n evoked.plot_white(cov, rank={'mag': 101, 'grad': 201}, time_unit='s')\n fig = evoked.plot_white(cov, rank={'mag': 101}, time_unit='s') # test rank\n evoked.plot_white(cov, rank={'grad': 201}, time_unit='s', axes=fig.axes)\n with pytest.raises(ValueError, match=r'must have shape \\(3,\\), got \\(2,'):\n evoked.plot_white(cov, axes=fig.axes[:2])\n with pytest.raises(ValueError, match='When not using SSS'):\n evoked.plot_white(cov, rank={'meg': 306})\n evoked.plot_white([cov, cov], time_unit='s')\n plt.close('all')\n\n assert 'eeg' not in evoked\n fig = plot_evoked_white(evoked, [cov, cov])\n assert len(fig.axes) == 2 * 2\n axes = np.array(fig.axes).reshape(2, 2)\n plot_evoked_white(evoked, [cov, cov], axes=axes)\n with pytest.raises(ValueError, match=r'have shape \\(2, 2\\), got'):\n plot_evoked_white(evoked, [cov, cov], axes=axes[:, :1])\n\n # Hack to test plotting of maxfiltered data\n evoked_sss = evoked.copy()\n sss = dict(sss_info=dict(in_order=80, components=np.arange(80)))\n evoked_sss.info['proc_history'] = [dict(max_info=sss)]\n evoked_sss.plot_white(cov, rank={'meg': 64})\n with pytest.raises(ValueError, match='When using SSS'):\n evoked_sss.plot_white(cov, rank={'grad': 201})\n evoked_sss.plot_white(cov, time_unit='s')\n plt.close('all')\n\n\ndef test_plot_compare_evokeds():\n \"\"\"Test plot_compare_evokeds.\"\"\"\n evoked = _get_epochs().average()\n # test defaults\n figs = plot_compare_evokeds(evoked)\n assert len(figs) == 2\n # test picks, combine, and vlines (1-channel pick also shows sensor inset)\n picks = ['MEG 0113', 'mag'] + 2 * [['MEG 0113', 'MEG 0112']] + [[0, 1]]\n vlines = [[0.1, 0.2], []] + 3 * ['auto']\n combine = [None, 'mean', 'std', None, lambda x: np.min(x, axis=1)]\n title = ['MEG 0113', '(mean)', '(std. dev.)', '(GFP)', 'MEG 0112']\n for _p, _v, _c, _t in zip(picks, vlines, combine, title):\n fig = plot_compare_evokeds(evoked, picks=_p, vlines=_v, combine=_c)\n assert fig[0].axes[0].get_title().endswith(_t)\n # test passing more than one evoked\n red, blue = evoked.copy(), evoked.copy()\n red.data *= 1.5\n blue.data /= 1.5\n evoked_dict = {'aud/l': blue, 'aud/r': red, 'vis': evoked}\n huge_dict = {'cond{}'.format(i): ev for i, ev in enumerate([evoked] * 11)}\n plot_compare_evokeds(evoked_dict) # dict\n plot_compare_evokeds([[red, evoked], [blue, evoked]]) # list of lists\n figs = plot_compare_evokeds({'cond': [blue, red, evoked]}) # dict of list\n # test that confidence bands are plausible\n for fig in figs:\n extents = fig.axes[0].collections[0].get_paths()[0].get_extents()\n xlim, ylim = extents.get_points().T\n assert np.allclose(xlim, evoked.times[[0, -1]])\n line = fig.axes[0].lines[0]\n xvals = line.get_xdata()\n assert np.allclose(xvals, evoked.times)\n yvals = line.get_ydata()\n assert (yvals < ylim[1]).all()\n assert (yvals > ylim[0]).all()\n plt.close('all')\n # test other CI args\n for _ci in (None, False, 0.5,\n lambda x: np.stack([x.mean(axis=0) + 1, x.mean(axis=0) - 1])):\n plot_compare_evokeds({'cond': [blue, red, evoked]}, ci=_ci)\n with pytest.raises(TypeError, match='\"ci\" must be None, bool, float or'):\n plot_compare_evokeds(evoked, ci='foo')\n # test sensor inset, legend location, and axis inversion & truncation\n plot_compare_evokeds(evoked_dict, invert_y=True, legend='upper left',\n show_sensors='center', truncate_xaxis=False,\n truncate_yaxis=False)\n plot_compare_evokeds(evoked, ylim=dict(mag=(-50, 50)), truncate_yaxis=True)\n plt.close('all')\n # test styles\n plot_compare_evokeds(evoked_dict, colors=['b', 'r', 'g'],\n linestyles=[':', '-', '--'], split_legend=True)\n style_dict = dict(aud=dict(alpha=0.3), vis=dict(linewidth=3, c='k'))\n plot_compare_evokeds(evoked_dict, styles=style_dict, colors={'aud/r': 'r'},\n linestyles=dict(vis='dotted'), ci=False)\n plot_compare_evokeds(evoked_dict, colors=list(range(3)))\n plt.close('all')\n # test colormap\n cmap = get_cmap('viridis')\n plot_compare_evokeds(evoked_dict, cmap=cmap, colors=dict(aud=0.4, vis=0.9))\n plot_compare_evokeds(evoked_dict, cmap=cmap, colors=dict(aud=1, vis=2))\n plot_compare_evokeds(evoked_dict, cmap=('cmap title', 'inferno'),\n linestyles=['-', ':', '--'])\n plt.close('all')\n # test warnings\n with pytest.warns(RuntimeWarning, match='in \"picks\"; cannot combine'):\n plot_compare_evokeds(evoked, picks=[0], combine='median')\n plt.close('all')\n # test errors\n with pytest.raises(TypeError, match='\"evokeds\" must be a dict, list'):\n plot_compare_evokeds('foo')\n with pytest.raises(ValueError, match=r'keys in \"styles\" \\(.*\\) must '):\n plot_compare_evokeds(evoked_dict, styles=dict(foo='foo', bar='bar'))\n with pytest.raises(ValueError, match='colors in the default color cycle'):\n plot_compare_evokeds(huge_dict, colors=None)\n with pytest.raises(TypeError, match='\"cmap\" is specified, then \"colors\"'):\n plot_compare_evokeds(evoked_dict, cmap='Reds', colors={'aud/l': 'foo',\n 'aud/r': 'bar',\n 'vis': 'baz'})\n plt.close('all')\n for kwargs in [dict(colors=[0, 1]), dict(linestyles=['-', ':'])]:\n match = r'but there are only \\d* (colors|linestyles). Please specify'\n with pytest.raises(ValueError, match=match):\n plot_compare_evokeds(evoked_dict, **kwargs)\n for kwargs in [dict(colors='foo'), dict(linestyles='foo')]:\n match = r'\"(colors|linestyles)\" must be a dict, list, or None; got '\n with pytest.raises(TypeError, match=match):\n plot_compare_evokeds(evoked_dict, **kwargs)\n for kwargs in [dict(colors=dict(foo='f')), dict(linestyles=dict(foo='f'))]:\n match = r'If \"(colors|linestyles)\" is a dict its keys \\(.*\\) must '\n with pytest.raises(ValueError, match=match):\n plot_compare_evokeds(evoked_dict, **kwargs)\n for kwargs in [dict(legend='foo'), dict(show_sensors='foo')]:\n with pytest.raises(ValueError, match='not a legal MPL loc, please'):\n plot_compare_evokeds(evoked_dict, **kwargs)\n with pytest.raises(TypeError, match='an instance of list or tuple'):\n plot_compare_evokeds(evoked_dict, vlines='foo')\n with pytest.raises(ValueError, match='\"truncate_yaxis\" must be bool or '):\n plot_compare_evokeds(evoked_dict, truncate_yaxis='foo')\n plt.close('all')\n # test axes='topo'\n figs = plot_compare_evokeds(evoked_dict, axes='topo', legend=True)\n for fig in figs:\n assert len(fig.axes[0].lines) == len(evoked_dict)\n # test with (fake) CSD data\n csd = _get_epochs(picks=np.arange(315, 320)).average() # 5 EEG chs\n for entry in csd.info['chs']:\n entry['coil_type'] = FIFF.FIFFV_COIL_EEG_CSD\n entry['unit'] = FIFF.FIFF_UNIT_V_M2\n plot_compare_evokeds(csd, picks='csd', axes='topo')\n # old tests\n red.info['chs'][0]['loc'][:2] = 0 # test plotting channel at zero\n plot_compare_evokeds([red, blue], picks=[0],\n ci=lambda x: [x.std(axis=0), -x.std(axis=0)])\n plot_compare_evokeds([list(evoked_dict.values())], picks=[0],\n ci=_parametric_ci)\n # smoke test for tmin >= 0 (from mailing list)\n red.crop(0.01, None)\n assert len(red.times) > 2\n plot_compare_evokeds(red)\n # plot a flat channel\n red.data = np.zeros_like(red.data)\n plot_compare_evokeds(red)\n # smoke test for one time point (not useful but should not fail)\n red.crop(0.02, 0.02)\n assert len(red.times) == 1\n plot_compare_evokeds(red)\n # now that we've cropped `red`:\n with pytest.raises(ValueError, match='not contain the same time instants'):\n plot_compare_evokeds(evoked_dict)\n plt.close('all')\n\n\ndef test_plot_compare_evokeds_neuromag122():\n \"\"\"Test topomap plotting.\"\"\"\n evoked = mne.read_evokeds(evoked_fname, 'Left Auditory',\n baseline=(None, 0))\n evoked.pick_types(meg='grad')\n evoked.pick_channels(evoked.ch_names[:122])\n ch_names = ['MEG %03d' % k for k in range(1, 123)]\n for c in evoked.info['chs']:\n c['coil_type'] = FIFF.FIFFV_COIL_NM_122\n evoked.rename_channels({c_old: c_new for (c_old, c_new) in\n zip(evoked.ch_names, ch_names)})\n mne.viz.plot_compare_evokeds([evoked, evoked])\n\n\[email protected]_testing_data\ndef test_plot_ctf():\n \"\"\"Test plotting of CTF evoked.\"\"\"\n ctf_dir = op.join(testing.data_path(download=False), 'CTF')\n raw_fname = op.join(ctf_dir, 'testdata_ctf.ds')\n\n raw = mne.io.read_raw_ctf(raw_fname, preload=True)\n events = np.array([[200, 0, 1]])\n event_id = 1\n tmin, tmax = -0.1, 0.5 # start and end of an epoch in sec.\n picks = mne.pick_types(raw.info, meg=True, stim=True, eog=True,\n ref_meg=True, exclude='bads')[::20]\n epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,\n picks=picks, preload=True, decim=10, verbose='error')\n evoked = epochs.average()\n evoked.plot_joint(times=[0.1])\n mne.viz.plot_compare_evokeds([evoked, evoked])\n\n # make sure axes position is \"almost\" unchanged\n # when axes were passed to plot_joint by the user\n times = [0.1, 0.2, 0.3]\n fig = plt.figure()\n\n # create custom axes for topomaps, colorbar and the timeseries\n gs = gridspec.GridSpec(3, 7, hspace=0.5, top=0.8)\n topo_axes = [fig.add_subplot(gs[0, idx * 2:(idx + 1) * 2])\n for idx in range(len(times))]\n topo_axes.append(fig.add_subplot(gs[0, -1]))\n ts_axis = fig.add_subplot(gs[1:, 1:-1])\n\n def get_axes_midpoints(axes):\n midpoints = list()\n for ax in axes[:-1]:\n pos = ax.get_position()\n midpoints.append([pos.x0 + (pos.width * 0.5),\n pos.y0 + (pos.height * 0.5)])\n return np.array(midpoints)\n\n midpoints_before = get_axes_midpoints(topo_axes)\n evoked.plot_joint(times=times, ts_args={'axes': ts_axis},\n topomap_args={'axes': topo_axes}, title=None)\n midpoints_after = get_axes_midpoints(topo_axes)\n assert (np.linalg.norm(midpoints_before - midpoints_after) < 0.1).all()\n\n\nrun_tests_if_main()\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.allclose", "numpy.min", "numpy.arange", "numpy.linalg.norm", "numpy.ones", "matplotlib.pyplot.axes", "numpy.zeros_like", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.close", "matplotlib.cm.get_cmap", "numpy.errstate", "numpy.array", "matplotlib.pyplot.figure" ] ]
roachsinai/CVND---Image-Captioning-Project
[ "fa885ed601d0e106b98b081959dae25d196846aa" ]
[ "data_loader.py" ]
[ "import nltk\nimport os\nimport torch\nimport torch.utils.data as data\nfrom vocabulary import Vocabulary\nfrom PIL import Image\nfrom pycocotools.coco import COCO\nimport numpy as np\nfrom tqdm import tqdm\nimport random\nimport json\n\ndef get_loader(transform,\n mode='train',\n batch_size=1,\n vocab_threshold=None,\n vocab_file='./vocab.pkl',\n start_word=\"<start>\",\n end_word=\"<end>\",\n unk_word=\"<unk>\",\n vocab_from_file=True,\n num_workers=0,\n coco_loc='/home/roach/Pictures/cv/coco'):\n \"\"\"Returns the data loader.\n Args:\n transform: Image transform.\n mode: One of 'train' or 'test'.\n batch_size: Batch size (if in testing mode, must have batch_size=1).\n vocab_threshold: Minimum word count threshold, 在所有的caption中出现次数超过threshold才加入词汇表.\n vocab_file: File containing the vocabulary.\n start_word: Special word denoting sentence start.\n end_word: Special word denoting sentence end.\n unk_word: Special word denoting unknown words.\n vocab_from_file: If False, create vocab from scratch & override any existing vocab_file.\n If True, load vocab from from existing vocab_file, if it exists.\n num_workers: Number of subprocesses to use for data loading\n coco_loc: The location of the folder containing the COCODataSet.\n \"\"\"\n\n assert mode in ['train', 'test'], \"mode must be one of 'train' or 'test'.\"\n if vocab_from_file==False: assert mode=='train', \"To generate vocab from captions file, must be in training mode (mode='train').\"\n\n # Based on mode (train, val, test), obtain img_folder and annotations_file.\n if mode == 'train':\n if vocab_from_file==True: assert os.path.exists(vocab_file), \"vocab_file does not exist. Change vocab_from_file to False to create vocab_file.\"\n img_folder = os.path.join(coco_loc, 'images/train2014/')\n annotations_file = os.path.join(coco_loc, 'annotations/captions_train2014.json')\n if mode == 'test':\n assert batch_size==1, \"Please change batch_size to 1 if testing your model.\"\n assert os.path.exists(vocab_file), \"Must first generate vocab.pkl from training data.\"\n assert vocab_from_file==True, \"Change vocab_from_file to True.\"\n img_folder = os.path.join(coco_loc, 'images/test2014/')\n annotations_file = os.path.join(coco_loc, 'annotations/image_info_test2014.json')\n\n # COCO caption dataset.\n dataset = CoCoDataset(transform=transform,\n mode=mode,\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_file=vocab_file,\n start_word=start_word,\n end_word=end_word,\n unk_word=unk_word,\n annotations_file=annotations_file,\n vocab_from_file=vocab_from_file,\n img_folder=img_folder)\n\n if mode == 'train':\n # Randomly sample a caption length, and sample indices with that length.\n indices = dataset.get_train_indices()\n # Create and assign a batch sampler to retrieve a batch with the sampled indices.\n initial_sampler = data.sampler.SubsetRandomSampler(indices=indices)\n # data loader for COCO dataset.\n data_loader = data.DataLoader(dataset=dataset,\n num_workers=num_workers,\n batch_sampler=data.sampler.BatchSampler(sampler=initial_sampler,\n batch_size=dataset.batch_size,\n drop_last=False))\n else:\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=dataset.batch_size,\n shuffle=True,\n num_workers=num_workers)\n\n return data_loader\n\nclass CoCoDataset(data.Dataset):\n\n def __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word,\n end_word, unk_word, annotations_file, vocab_from_file, img_folder):\n self.transform = transform\n self.mode = mode\n self.batch_size = batch_size\n self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,\n end_word, unk_word, annotations_file, vocab_from_file)\n self.img_folder = img_folder\n if self.mode == 'train':\n self.coco = COCO(annotations_file)\n self.ids = list(self.coco.anns.keys())\n print('Obtaining caption lengths...')\n all_tokens = [nltk.tokenize.word_tokenize(str(self.coco.anns[self.ids[index]]['caption']).lower()) for index in tqdm(np.arange(len(self.ids)))]\n self.caption_lengths = [len(token) for token in all_tokens]\n else:\n test_info = json.loads(open(annotations_file).read())\n self.paths = [item['file_name'] for item in test_info['images']]\n\n def __getitem__(self, index):\n # obtain image and caption if in training mode\n if self.mode == 'train':\n ann_id = self.ids[index]\n caption = self.coco.anns[ann_id]['caption']\n img_id = self.coco.anns[ann_id]['image_id']\n path = self.coco.loadImgs(img_id)[0]['file_name']\n\n # Convert image to tensor and pre-process using transform\n image = Image.open(os.path.join(self.img_folder, path)).convert('RGB')\n image = self.transform(image)\n\n # Convert caption to tensor of word ids.\n tokens = nltk.tokenize.word_tokenize(str(caption).lower())\n caption = []\n caption.append(self.vocab(self.vocab.start_word))\n caption.extend([self.vocab(token) for token in tokens])\n caption.append(self.vocab(self.vocab.end_word))\n caption = torch.Tensor(caption).long()\n\n # return pre-processed image and caption tensors\n return image, caption\n\n # obtain image if in test mode\n else:\n path = self.paths[index]\n\n # Convert image to tensor and pre-process using transform\n PIL_image = Image.open(os.path.join(self.img_folder, path)).convert('RGB')\n orig_image = np.array(PIL_image)\n image = self.transform(PIL_image)\n\n # return original image and pre-processed image tensor\n return orig_image, image\n\n def get_train_indices(self):\n sel_length = np.random.choice(self.caption_lengths)\n all_indices = np.where([self.caption_lengths[i] == sel_length for i in np.arange(len(self.caption_lengths))])[0]\n indices = list(np.random.choice(all_indices, size=self.batch_size))\n return indices\n\n def __len__(self):\n if self.mode == 'train':\n return len(self.ids)\n else:\n return len(self.paths)\n" ]
[ [ "torch.Tensor", "numpy.random.choice", "torch.utils.data.DataLoader", "torch.utils.data.sampler.SubsetRandomSampler", "numpy.array", "torch.utils.data.sampler.BatchSampler" ] ]
agonzale34/sdc-advanced-lane-detection
[ "10bb208deabfb5a654418d3f4d5404a7c57363af" ]
[ "src/models/line.py" ]
[ "import numpy as np\n\nfrom src.utils.params import *\n\n\n# Define a class to receive the characteristics of each line detection\nclass Line:\n\n def __init__(self):\n # was the line detected in the last iteration?\n self.detected = False\n # x values of the last n fits of the line\n self.recent_x_fitted = []\n # average x values of the fitted line over the last n iterations\n self.best_x = None\n # polynomial coefficients averaged over the last n iterations\n self.recent_fit = []\n self.best_fit = None\n # polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])]\n # radius of curvature of the line in some units\n self.radius_of_curvature = None\n self.recent_radius = []\n self.best_radius = 0\n # distance in meters of vehicle center from the line\n self.line_base_pos = None\n self.recent_pos = []\n self.best_pos = 0\n # difference in fit coefficients between last and new fits\n self.diffs = np.array([0, 0, 0], dtype='float')\n # x values for detected line pixels\n self.allx = None\n # y values for detected line pixels\n self.ally = None\n\n def append_x_fitted(self, x_fitted):\n self.recent_x_fitted.append(x_fitted)\n self.allx = x_fitted\n\n if len(self.recent_x_fitted) > N_LINES:\n self.recent_x_fitted.pop(0)\n\n if len(self.recent_x_fitted) > 1:\n self.best_x = np.mean(self.recent_x_fitted, axis=0)\n else:\n self.best_x = x_fitted\n\n def append_fit(self, fit):\n self.recent_fit.append(fit)\n self.current_fit = fit\n\n if len(self.recent_fit) > N_LINES:\n self.recent_fit.pop(0)\n\n if len(self.recent_fit) > 1:\n self.best_fit = np.mean(self.recent_fit, axis=0)\n else:\n self.best_fit = fit\n\n def append_pos(self, pos):\n self.recent_pos.append(pos)\n self.line_base_pos = pos\n\n if len(self.recent_pos) > N_LINES:\n self.recent_pos.pop(0)\n\n if len(self.recent_pos) > 1:\n self.best_pos = np.average(self.line_base_pos)\n else:\n self.best_pos = pos\n\n def calculate_curvature(self):\n # Calculate the polynomial in real meters\n y_max = np.argmax(self.ally) * YM_PER_PIX\n fit_cr = np.polyfit(self.ally * YM_PER_PIX, self.best_x * XM_PER_PIX, 2)\n self.radius_of_curvature = ((1 + (2 * fit_cr[0] * y_max + fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * fit_cr[0])\n self.recent_radius.append(self.radius_of_curvature)\n\n if len(self.recent_radius) > N_LINES:\n self.recent_radius.pop(0)\n\n if len(self.recent_radius) > 1:\n self.best_radius = np.average(self.recent_radius)\n else:\n self.best_radius = self.radius_of_curvature\n\n def check_sanity(self):\n if (abs(self.radius_of_curvature - self.best_radius) < 150) & (abs(self.line_base_pos - self.best_pos) < 1):\n self.detected = True\n else:\n self.recent_radius.pop()\n self.recent_pos.pop()\n self.recent_fit.pop()\n self.recent_x_fitted.pop()\n self.detected = False\n" ]
[ [ "numpy.polyfit", "numpy.absolute", "numpy.argmax", "numpy.mean", "numpy.average", "numpy.array" ] ]
ljjcoder/CSEI
[ "1dec6205c234e229629bdc2e88ccc3350f26f620" ]
[ "torchFewShot/models/resnet12.py" ]
[ "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, kernel=3, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n if kernel == 1:\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n elif kernel == 3:\n self.conv1 = conv3x3(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n if kernel == 1:\n self.conv3 = nn.Conv2d(planes, planes, kernel_size=1, bias=False)\n elif kernel == 3:\n self.conv3 = conv3x3(planes, planes)\n self.bn3 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, kernel=1, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, kernel=3):\n self.inplanes = 64\n self.kernel = kernel\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n\n self.layer1 = self._make_layer(block, 64, layers[0], stride=2) \n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n self.nFeat = 512 * block.expansion\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, self.kernel, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, self.kernel))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x\n\n\ndef resnet12():\n model = ResNet(BasicBlock, [1,1,1,1], kernel=3)\n return model\n" ]
[ [ "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.BatchNorm2d" ] ]
GinkoBalboa/xgboost
[ "29bfa94bb6a99721f3ab9e0c4f05ee2df4345294" ]
[ "tests/python/test_callback.py" ]
[ "from typing import Union\nimport xgboost as xgb\nimport pytest\nimport os\nimport testing as tm\nimport tempfile\n\n# We use the dataset for tests.\npytestmark = pytest.mark.skipif(**tm.no_sklearn())\n\n\nclass TestCallbacks:\n @classmethod\n def setup_class(cls):\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n cls.X = X\n cls.y = y\n\n split = int(X.shape[0]*0.8)\n cls.X_train = X[: split, ...]\n cls.y_train = y[: split, ...]\n cls.X_valid = X[split:, ...]\n cls.y_valid = y[split:, ...]\n\n def run_evaluation_monitor(\n self,\n D_train: xgb.DMatrix,\n D_valid: xgb.DMatrix,\n rounds: int,\n verbose_eval: Union[bool, int]\n ):\n def check_output(output: str) -> None:\n if int(verbose_eval) == 1:\n # Should print each iteration info\n assert len(output.split('\\n')) == rounds\n elif int(verbose_eval) > rounds:\n # Should print first and latest iteration info\n assert len(output.split('\\n')) == 2\n else:\n # Should print info by each period additionaly to first and latest\n # iteration\n num_periods = rounds // int(verbose_eval)\n # Extra information is required for latest iteration\n is_extra_info_required = num_periods * int(verbose_eval) < (rounds - 1)\n assert len(output.split('\\n')) == (\n 1 + num_periods + int(is_extra_info_required)\n )\n\n evals_result: xgb.callback.TrainingCallback.EvalsLog = {}\n params = {'objective': 'binary:logistic', 'eval_metric': 'error'}\n with tm.captured_output() as (out, err):\n xgb.train(\n params, D_train,\n evals=[(D_train, 'Train'), (D_valid, 'Valid')],\n num_boost_round=rounds,\n evals_result=evals_result,\n verbose_eval=verbose_eval,\n )\n output: str = out.getvalue().strip()\n check_output(output)\n\n with tm.captured_output() as (out, err):\n xgb.cv(params, D_train, num_boost_round=rounds, verbose_eval=verbose_eval)\n output = out.getvalue().strip()\n check_output(output)\n\n def test_evaluation_monitor(self):\n D_train = xgb.DMatrix(self.X_train, self.y_train)\n D_valid = xgb.DMatrix(self.X_valid, self.y_valid)\n evals_result = {}\n rounds = 10\n xgb.train({'objective': 'binary:logistic',\n 'eval_metric': 'error'}, D_train,\n evals=[(D_train, 'Train'), (D_valid, 'Valid')],\n num_boost_round=rounds,\n evals_result=evals_result,\n verbose_eval=True)\n assert len(evals_result['Train']['error']) == rounds\n assert len(evals_result['Valid']['error']) == rounds\n\n self.run_evaluation_monitor(D_train, D_valid, rounds, True)\n self.run_evaluation_monitor(D_train, D_valid, rounds, 2)\n self.run_evaluation_monitor(D_train, D_valid, rounds, 4)\n self.run_evaluation_monitor(D_train, D_valid, rounds, rounds + 1)\n\n def test_early_stopping(self):\n D_train = xgb.DMatrix(self.X_train, self.y_train)\n D_valid = xgb.DMatrix(self.X_valid, self.y_valid)\n evals_result = {}\n rounds = 30\n early_stopping_rounds = 5\n booster = xgb.train({'objective': 'binary:logistic',\n 'eval_metric': 'error'}, D_train,\n evals=[(D_train, 'Train'), (D_valid, 'Valid')],\n num_boost_round=rounds,\n evals_result=evals_result,\n verbose_eval=True,\n early_stopping_rounds=early_stopping_rounds)\n dump = booster.get_dump(dump_format='json')\n assert len(dump) - booster.best_iteration == early_stopping_rounds + 1\n\n # No early stopping, best_iteration should be set to last epoch\n booster = xgb.train({'objective': 'binary:logistic',\n 'eval_metric': 'error'}, D_train,\n evals=[(D_train, 'Train'), (D_valid, 'Valid')],\n num_boost_round=10,\n evals_result=evals_result,\n verbose_eval=True)\n assert booster.num_boosted_rounds() - 1 == booster.best_iteration\n\n def test_early_stopping_custom_eval(self):\n D_train = xgb.DMatrix(self.X_train, self.y_train)\n D_valid = xgb.DMatrix(self.X_valid, self.y_valid)\n early_stopping_rounds = 5\n booster = xgb.train({'objective': 'binary:logistic',\n 'eval_metric': 'error',\n 'tree_method': 'hist'}, D_train,\n evals=[(D_train, 'Train'), (D_valid, 'Valid')],\n feval=tm.eval_error_metric,\n num_boost_round=1000,\n early_stopping_rounds=early_stopping_rounds,\n verbose_eval=False)\n dump = booster.get_dump(dump_format='json')\n assert len(dump) - booster.best_iteration == early_stopping_rounds + 1\n\n def test_early_stopping_customize(self):\n D_train = xgb.DMatrix(self.X_train, self.y_train)\n D_valid = xgb.DMatrix(self.X_valid, self.y_valid)\n early_stopping_rounds = 5\n early_stop = xgb.callback.EarlyStopping(rounds=early_stopping_rounds,\n metric_name='CustomErr',\n data_name='Train')\n # Specify which dataset and which metric should be used for early stopping.\n booster = xgb.train(\n {'objective': 'binary:logistic',\n 'eval_metric': ['error', 'rmse'],\n 'tree_method': 'hist'}, D_train,\n evals=[(D_train, 'Train'), (D_valid, 'Valid')],\n feval=tm.eval_error_metric,\n num_boost_round=1000,\n callbacks=[early_stop],\n verbose_eval=False)\n dump = booster.get_dump(dump_format='json')\n assert len(dump) - booster.best_iteration == early_stopping_rounds + 1\n assert len(early_stop.stopping_history['Train']['CustomErr']) == len(dump)\n\n rounds = 100\n early_stop = xgb.callback.EarlyStopping(\n rounds=early_stopping_rounds,\n metric_name='CustomErr',\n data_name='Train',\n min_delta=100,\n save_best=True,\n )\n booster = xgb.train(\n {\n 'objective': 'binary:logistic',\n 'eval_metric': ['error', 'rmse'],\n 'tree_method': 'hist'\n },\n D_train,\n evals=[(D_train, 'Train'), (D_valid, 'Valid')],\n feval=tm.eval_error_metric,\n num_boost_round=rounds,\n callbacks=[early_stop],\n verbose_eval=False\n )\n # No iteration can be made with min_delta == 100\n assert booster.best_iteration == 0\n assert booster.num_boosted_rounds() == 1\n\n def test_early_stopping_skl(self):\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n early_stopping_rounds = 5\n cls = xgb.XGBClassifier(\n early_stopping_rounds=early_stopping_rounds, eval_metric='error'\n )\n cls.fit(X, y, eval_set=[(X, y)])\n booster = cls.get_booster()\n dump = booster.get_dump(dump_format='json')\n assert len(dump) - booster.best_iteration == early_stopping_rounds + 1\n\n def test_early_stopping_custom_eval_skl(self):\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n early_stopping_rounds = 5\n early_stop = xgb.callback.EarlyStopping(rounds=early_stopping_rounds)\n cls = xgb.XGBClassifier(\n eval_metric=tm.eval_error_metric_skl, callbacks=[early_stop]\n )\n cls.fit(X, y, eval_set=[(X, y)])\n booster = cls.get_booster()\n dump = booster.get_dump(dump_format='json')\n assert len(dump) - booster.best_iteration == early_stopping_rounds + 1\n\n def test_early_stopping_save_best_model(self):\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n n_estimators = 100\n early_stopping_rounds = 5\n early_stop = xgb.callback.EarlyStopping(rounds=early_stopping_rounds,\n save_best=True)\n cls = xgb.XGBClassifier(\n n_estimators=n_estimators,\n eval_metric=tm.eval_error_metric_skl,\n callbacks=[early_stop]\n )\n cls.fit(X, y, eval_set=[(X, y)])\n booster = cls.get_booster()\n dump = booster.get_dump(dump_format='json')\n assert len(dump) == booster.best_iteration + 1\n\n early_stop = xgb.callback.EarlyStopping(rounds=early_stopping_rounds,\n save_best=True)\n cls = xgb.XGBClassifier(\n booster='gblinear', n_estimators=10, eval_metric=tm.eval_error_metric_skl\n )\n with pytest.raises(ValueError):\n cls.fit(X, y, eval_set=[(X, y)], callbacks=[early_stop])\n\n # No error\n early_stop = xgb.callback.EarlyStopping(rounds=early_stopping_rounds,\n save_best=False)\n xgb.XGBClassifier(\n booster='gblinear', n_estimators=10, eval_metric=tm.eval_error_metric_skl\n ).fit(X, y, eval_set=[(X, y)], callbacks=[early_stop])\n\n def test_early_stopping_continuation(self):\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n cls = xgb.XGBClassifier(eval_metric=tm.eval_error_metric_skl)\n early_stopping_rounds = 5\n early_stop = xgb.callback.EarlyStopping(\n rounds=early_stopping_rounds, save_best=True\n )\n with pytest.warns(UserWarning):\n cls.fit(X, y, eval_set=[(X, y)], callbacks=[early_stop])\n\n booster = cls.get_booster()\n assert booster.num_boosted_rounds() == booster.best_iteration + 1\n\n with tempfile.TemporaryDirectory() as tmpdir:\n path = os.path.join(tmpdir, 'model.json')\n cls.save_model(path)\n cls = xgb.XGBClassifier()\n cls.load_model(path)\n assert cls._Booster is not None\n early_stopping_rounds = 3\n cls.set_params(eval_metric=tm.eval_error_metric_skl)\n cls.fit(X, y, eval_set=[(X, y)], early_stopping_rounds=early_stopping_rounds)\n booster = cls.get_booster()\n assert booster.num_boosted_rounds() == \\\n booster.best_iteration + early_stopping_rounds + 1\n\n def test_deprecated(self):\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n early_stopping_rounds = 5\n early_stop = xgb.callback.EarlyStopping(\n rounds=early_stopping_rounds, save_best=True\n )\n clf = xgb.XGBClassifier(\n eval_metric=tm.eval_error_metric_skl, callbacks=[early_stop]\n )\n with pytest.raises(ValueError, match=r\".*set_params.*\"):\n clf.fit(X, y, eval_set=[(X, y)], callbacks=[early_stop])\n\n def run_eta_decay(self, tree_method):\n \"\"\"Test learning rate scheduler, used by both CPU and GPU tests.\"\"\"\n scheduler = xgb.callback.LearningRateScheduler\n\n dpath = os.path.join(tm.PROJECT_ROOT, 'demo/data/')\n dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')\n dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')\n watchlist = [(dtest, 'eval'), (dtrain, 'train')]\n num_round = 4\n\n warning_check = tm.noop_context()\n\n # learning_rates as a list\n # init eta with 0 to check whether learning_rates work\n param = {'max_depth': 2, 'eta': 0, 'verbosity': 0,\n 'objective': 'binary:logistic', 'eval_metric': 'error',\n 'tree_method': tree_method}\n evals_result = {}\n with warning_check:\n bst = xgb.train(param, dtrain, num_round, watchlist,\n callbacks=[scheduler([\n 0.8, 0.7, 0.6, 0.5\n ])],\n evals_result=evals_result)\n eval_errors_0 = list(map(float, evals_result['eval']['error']))\n assert isinstance(bst, xgb.core.Booster)\n # validation error should decrease, if eta > 0\n assert eval_errors_0[0] > eval_errors_0[-1]\n\n # init learning_rate with 0 to check whether learning_rates work\n param = {'max_depth': 2, 'learning_rate': 0, 'verbosity': 0,\n 'objective': 'binary:logistic', 'eval_metric': 'error',\n 'tree_method': tree_method}\n evals_result = {}\n with warning_check:\n bst = xgb.train(param, dtrain, num_round, watchlist,\n callbacks=[scheduler(\n [0.8, 0.7, 0.6, 0.5])],\n evals_result=evals_result)\n eval_errors_1 = list(map(float, evals_result['eval']['error']))\n assert isinstance(bst, xgb.core.Booster)\n # validation error should decrease, if learning_rate > 0\n assert eval_errors_1[0] > eval_errors_1[-1]\n\n # check if learning_rates override default value of eta/learning_rate\n param = {\n 'max_depth': 2, 'verbosity': 0, 'objective': 'binary:logistic',\n 'eval_metric': 'error', 'tree_method': tree_method\n }\n evals_result = {}\n with warning_check:\n bst = xgb.train(param, dtrain, num_round, watchlist,\n callbacks=[scheduler(\n [0, 0, 0, 0]\n )],\n evals_result=evals_result)\n eval_errors_2 = list(map(float, evals_result['eval']['error']))\n assert isinstance(bst, xgb.core.Booster)\n # validation error should not decrease, if eta/learning_rate = 0\n assert eval_errors_2[0] == eval_errors_2[-1]\n\n # learning_rates as a customized decay function\n def eta_decay(ithround, num_boost_round=num_round):\n return num_boost_round / (ithround + 1)\n\n evals_result = {}\n with warning_check:\n bst = xgb.train(param, dtrain, num_round, watchlist,\n callbacks=[\n scheduler(eta_decay)\n ],\n evals_result=evals_result)\n eval_errors_3 = list(map(float, evals_result['eval']['error']))\n\n assert isinstance(bst, xgb.core.Booster)\n\n assert eval_errors_3[0] == eval_errors_2[0]\n\n for i in range(1, len(eval_errors_0)):\n assert eval_errors_3[i] != eval_errors_2[i]\n\n with warning_check:\n xgb.cv(param, dtrain, num_round, callbacks=[scheduler(eta_decay)])\n\n @pytest.mark.parametrize(\"tree_method\", [\"hist\", \"approx\", \"exact\"])\n def test_eta_decay(self, tree_method):\n self.run_eta_decay(tree_method)\n\n def test_check_point(self):\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n m = xgb.DMatrix(X, y)\n with tempfile.TemporaryDirectory() as tmpdir:\n check_point = xgb.callback.TrainingCheckPoint(directory=tmpdir,\n iterations=1,\n name='model')\n xgb.train({'objective': 'binary:logistic'}, m,\n num_boost_round=10,\n verbose_eval=False,\n callbacks=[check_point])\n for i in range(1, 10):\n assert os.path.exists(\n os.path.join(tmpdir, 'model_' + str(i) + '.json'))\n\n check_point = xgb.callback.TrainingCheckPoint(directory=tmpdir,\n iterations=1,\n as_pickle=True,\n name='model')\n xgb.train({'objective': 'binary:logistic'}, m,\n num_boost_round=10,\n verbose_eval=False,\n callbacks=[check_point])\n for i in range(1, 10):\n assert os.path.exists(\n os.path.join(tmpdir, 'model_' + str(i) + '.pkl'))\n\n def test_callback_list(self):\n X, y = tm.get_boston()\n m = xgb.DMatrix(X, y)\n callbacks = [xgb.callback.EarlyStopping(rounds=10)]\n for i in range(4):\n xgb.train({'objective': 'reg:squarederror',\n 'eval_metric': 'rmse'}, m,\n evals=[(m, 'Train')],\n num_boost_round=1,\n verbose_eval=True,\n callbacks=callbacks)\n assert len(callbacks) == 1\n" ]
[ [ "sklearn.datasets.load_breast_cancer" ] ]
owenmwilliams/land_search
[ "630e71672a5fff21c833d9905406b9ef70571b28" ]
[ "models/est/fltr/comps.py" ]
[ "import psycopg2\nfrom datetime import datetime\nfrom psycopg2 import sql\nfrom est.fltr import county_return\nfrom est.db.cur import con_cur\nimport numpy as np\nimport pandas as pd\n\ndef comp_find(est, a, b):\n temp1 = est\n temp2 = np.array(temp1[0])\n county = temp2[0].strip()\n state = temp2[1].strip()\n\n cur, con = con_cur()\n cur.execute(\"\"\"\n SELECT comp_st, comp_cty, comp_lv, comp_perc FROM est_LandValue(%s, %s, %s, %s) \n \"\"\", (a, b, state, county))\n comp_states = cur.fetchall()\n con.close()\n return(comp_states)\n\ndef find_comps(state, county, radius, population):\n cur, con = con_cur()\n cur.execute(\"\"\"\n SELECT comp_st, comp_cty, comp_lv, comp_perc FROM est_LandValue(%s, %s, %s, %s) \n \"\"\", (radius, population, state, county))\n comp_states = pd.DataFrame(cur.fetchall(), columns = ['State', 'County', 'Land Value', 'Perc Land Value'])\n con.close()\n return(comp_states)\n" ]
[ [ "numpy.array" ] ]
qianqian121/puma
[ "4a5980fcd302fc794f50e782e478a3bdd77f57b2" ]
[ "apps/pipelines/slam/puma_pipeline.py" ]
[ "#!/usr/bin/env python3\nimport copy\nimport glob\nimport os\nfrom collections import deque\nfrom pathlib import Path\n\nimport click\nimport numpy as np\nimport open3d as o3d\n\nfrom puma.mesh import create_mesh_from_map\nfrom puma.preprocessing import preprocess\nfrom puma.registration import register_scan_to_mesh, run_icp\nfrom puma.utils import (\n get_progress_bar,\n load_config_from_yaml,\n print_progress,\n save_config_yaml,\n save_poses,\n vel2cam,\n)\n\n\[email protected]()\[email protected](\n \"--config\",\n \"-c\",\n type=click.Path(exists=True),\n default=\"config/puma.yml\",\n help=\"Path to the config file\",\n)\[email protected](\n \"--dataset\",\n \"-d\",\n type=click.Path(exists=True),\n default=os.environ[\"HOME\"] + \"/data/kitti-odometry/ply/\",\n help=\"Location of the KITTI-like dataset\",\n)\[email protected](\n \"--n_scans\",\n \"-n\",\n type=int,\n default=-1,\n required=False,\n help=\"Number of scans to integrate\",\n)\[email protected](\n \"--sequence\",\n \"-s\",\n type=str,\n default=None,\n required=False,\n help=\"Sequence number\",\n)\[email protected](\n \"--odometry_only\",\n is_flag=True,\n default=False,\n help=\"Run odometry only pipeline\",\n)\ndef main(config, dataset, n_scans, sequence, odometry_only):\n \"\"\"This script to run the full puma pipeline as described in the paper. It\n assumes you have the data in the kitti-like format and all the scans where\n already pre-converted to '.ply', for example:\n\n \\b\n kitti/ply\n ├── poses\n │   └── 00.txt\n └── sequences\n └── 00\n ├── calib.txt\n ├── poses.txt\n ├── times.txt\n └── velodyne\n ├── 000000.ply\n ├── 000001.ply\n └── ...\n\n How to run it and check a quick example:\n\n \\b\n $ ./slam/puma_pipeline.py -d ./data/ -s 00 -n 40\n \"\"\"\n config = load_config_from_yaml(config)\n if config.debug:\n o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)\n dataset = os.path.join(dataset, \"\")\n os.makedirs(config.out_dir, exist_ok=True)\n\n map_name = Path(dataset).parent.name\n map_name += \"_\" + sequence\n map_name += \"_depth_\" + str(config.depth)\n map_name += \"_cropped\" if config.min_density else \"\"\n map_name += \"_\" + config.method\n map_name += \"_\" + config.strategy\n\n # Save config\n config_file = map_name + \".yml\"\n config_file = os.path.join(config.out_dir, config_file)\n save_config_yaml(config_file, dict(config))\n\n poses_file = map_name + \".txt\"\n poses_file = os.path.join(config.out_dir, poses_file)\n print(\"Results will be saved to\", poses_file)\n\n scans = os.path.join(dataset, \"sequences\", sequence, \"velodyne\", \"\")\n scan_names = sorted(glob.glob(scans + \"*.ply\"))\n\n # Use the whole sequence if -1 is specified\n n_scans = len(scan_names) if n_scans == -1 else n_scans\n\n # Create data containers to store the map\n mesh = o3d.geometry.TriangleMesh()\n\n # Create a circular buffer, the same way we do in the C++ implementation\n local_map = deque(maxlen=config.acc_frame_count)\n\n # Mapping facilities\n global_mesh = o3d.geometry.TriangleMesh()\n mapping_enabled = not odometry_only\n\n poses = [np.eye(4, 4, dtype=np.float64)]\n deltas = [np.eye(4, 4, dtype=np.float64)]\n last_scan = preprocess(o3d.io.read_point_cloud(scan_names[0]), config)\n\n # Start the Odometry and Mapping pipeline\n scan_count = 0\n map_count = 0\n pbar = get_progress_bar(1, n_scans)\n for idx in pbar:\n str_size = print_progress(pbar, idx, n_scans)\n scan = preprocess(o3d.io.read_point_cloud(scan_names[idx]), config)\n initial_guess = deltas[-1].copy() if config.warm_start else np.eye(4)\n if mesh.has_vertices():\n msg = \"[scan #{}] Registering scan to mesh model\".format(idx)\n pbar.set_description(msg.rjust(str_size))\n mesh.transform(np.linalg.inv(poses[-1]))\n pose = register_scan_to_mesh(\n scan, mesh, initial_guess, deltas, last_scan, config\n )\n else:\n pose = run_icp(scan, last_scan, initial_guess, config)\n deltas.append(pose)\n poses.append(poses[-1] @ pose)\n last_scan = copy.deepcopy(scan)\n scan.transform(poses[-1])\n local_map.append(scan)\n\n scan_count += 1\n if scan_count >= config.acc_frame_count or idx == n_scans - 1:\n save_poses(poses_file, vel2cam(poses))\n msg = \"[scan #{}] Running PSR over local_map\".format(idx)\n pbar.set_description(msg.rjust(str_size))\n mesh, _ = create_mesh_from_map(\n local_map, config.depth, config.n_threads, config.min_density\n )\n\n if mapping_enabled:\n map_count += 1\n if map_count >= config.acc_map_count or idx == n_scans - 1:\n map_count = 0\n global_mesh += mesh\n global_mesh = global_mesh.remove_duplicated_triangles()\n global_mesh = global_mesh.remove_duplicated_vertices()\n\n if mapping_enabled:\n # Save map to file\n mesh_map_file = os.path.join(config.out_dir, map_name + \".ply\")\n print(\"Saving Map to\", mesh_map_file)\n o3d.io.write_triangle_mesh(mesh_map_file, global_mesh)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.linalg.inv", "numpy.eye" ] ]
ZhengL97/frites
[ "48d752a8c316d6e4cf2e7c795af539c42eea8cd6" ]
[ "frites/core/gcmi_nd.py" ]
[ "\"\"\"Multi-dimentional Gaussian copula mutual information estimation.\"\"\"\nimport numpy as np\nfrom scipy.special import psi\nfrom itertools import product\n\nfrom frites.core.copnorm import copnorm_nd\n\n###############################################################################\n###############################################################################\n# N-D TOOLS\n###############################################################################\n###############################################################################\n\n\ndef nd_reshape(x, mvaxis=None, traxis=-1):\n \"\"\"Multi-dimentional reshaping.\n\n This function is used to be sure that an nd array has a correct shape\n of (..., mvaxis, traxis).\n\n Parameters\n ----------\n x : array_like\n Multi-dimentional array\n mvaxis : int | None\n Spatial location of the axis to consider if multi-variate analysis\n is needed\n traxis : int | -1\n Spatial location of the trial axis. By default the last axis is\n considered\n\n Returns\n -------\n x_rsh : array_like\n The reshaped multi-dimentional array of shape (..., mvaxis, traxis)\n \"\"\"\n assert isinstance(traxis, int)\n traxis = np.arange(x.ndim)[traxis]\n\n # Create an empty mvaxis axis\n if not isinstance(mvaxis, int):\n x = x[..., np.newaxis]\n mvaxis = -1\n assert isinstance(mvaxis, int)\n mvaxis = np.arange(x.ndim)[mvaxis]\n\n # move the multi-variate and trial axis\n x = np.moveaxis(x, (mvaxis, traxis), (-2, -1))\n\n return x\n\n\ndef nd_shape_checking(x, y, mvaxis, traxis):\n \"\"\"Check that the shape between two ndarray is consitent.\n\n x.shape = (nx_1, ..., n_xn, x_mvaxis, traxis)\n y.shape = (nx_1, ..., n_xn, y_mvaxis, traxis)\n \"\"\"\n assert x.ndim == y.ndim\n dims = np.delete(np.arange(x.ndim), -2)\n assert all([x.shape[k] == y.shape[k] for k in dims])\n\n\n###############################################################################\n###############################################################################\n# MUTUAL INFORMATION\n###############################################################################\n###############################################################################\n\n\ndef mi_nd_gg(x, y, mvaxis=None, traxis=-1, biascorrect=True, demeaned=False,\n shape_checking=True):\n \"\"\"Multi-dimentional MI between two Gaussian variables in bits.\n\n Parameters\n ----------\n x, y : array_like\n Arrays to consider for computing the Mutual Information. The two input\n variables x and y should have the same shape except on the mvaxis\n (if needed).\n mvaxis : int | None\n Spatial location of the axis to consider if multi-variate analysis\n is needed\n traxis : int | -1\n Spatial location of the trial axis. By default the last axis is\n considered\n biascorrect : bool | True\n Specifies whether bias correction should be applied to the estimated MI\n demeaned : bool | False\n Specifies whether the input data already has zero mean (true if it has\n been copula-normalized)\n shape_checking : bool | True\n Perform a reshape and check that x and y shapes are consistents. For\n high performances and to avoid extensive memory usage, it's better to\n already have x and y with a shape of (..., mvaxis, traxis) and to set\n this parameter to False\n\n Returns\n -------\n mi : array_like\n The mutual information with the same shape as x and y, without the\n mvaxis and traxis\n \"\"\"\n # Multi-dimentional shape checking\n if shape_checking:\n x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)\n y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)\n nd_shape_checking(x, y, mvaxis, traxis)\n\n # x.shape (..., x_mvaxis, traxis)\n # y.shape (..., y_mvaxis, traxis)\n ntrl = x.shape[-1]\n nvarx, nvary = x.shape[-2], y.shape[-2]\n nvarxy = nvarx + nvary\n\n # joint variable along the mvaxis\n xy = np.concatenate((x, y), axis=-2)\n if not demeaned:\n xy -= xy.mean(axis=-1, keepdims=True)\n cxy = np.einsum('...ij, ...kj->...ik', xy, xy)\n cxy /= float(ntrl - 1.)\n\n # submatrices of joint covariance\n cx = cxy[..., :nvarx, :nvarx]\n cy = cxy[..., nvarx:, nvarx:]\n\n # Cholesky decomposition\n chcxy = np.linalg.cholesky(cxy)\n chcx = np.linalg.cholesky(cx)\n chcy = np.linalg.cholesky(cy)\n\n # entropies in nats\n # normalizations cancel for mutual information\n hx = np.log(np.einsum('...ii->...i', chcx)).sum(-1)\n hy = np.log(np.einsum('...ii->...i', chcy)).sum(-1)\n hxy = np.log(np.einsum('...ii->...i', chcxy)).sum(-1)\n\n ln2 = np.log(2)\n if biascorrect:\n vec = np.arange(1, nvarxy + 1)\n psiterms = psi((ntrl - vec).astype(np.float) / 2.0) / 2.0\n dterm = (ln2 - np.log(ntrl - 1.0)) / 2.0\n hx = hx - nvarx * dterm - psiterms[:nvarx].sum()\n hy = hy - nvary * dterm - psiterms[:nvary].sum()\n hxy = hxy - nvarxy * dterm - psiterms[:nvarxy].sum()\n\n # MI in bits\n i = (hx + hy - hxy) / ln2\n return i\n\n\ndef mi_model_nd_gd(x, y, mvaxis=None, traxis=-1, biascorrect=True,\n demeaned=False, shape_checking=True):\n \"\"\"Multi-dimentional MI between a Gaussian and a discret variables in bits.\n\n This function is based on ANOVA style model comparison.\n\n Parameters\n ----------\n x, y : array_like\n Arrays to consider for computing the Mutual Information. The two input\n variables x and y should have the same shape except on the mvaxis\n (if needed).\n mvaxis : int | None\n Spatial location of the axis to consider if multi-variate analysis\n is needed\n traxis : int | -1\n Spatial location of the trial axis. By default the last axis is\n considered\n biascorrect : bool | True\n Specifies whether bias correction should be applied to the estimated MI\n demeaned : bool | False\n Specifies whether the input data already has zero mean (true if it has\n been copula-normalized)\n shape_checking : bool | True\n Perform a reshape and check that x and y shapes are consistents. For\n high performances and to avoid extensive memory usage, it's better to\n already have x and y with a shape of (..., mvaxis, traxis) and to set\n this parameter to False\n\n Returns\n -------\n mi : array_like\n The mutual information with the same shape as x and y, without the\n mvaxis and traxis\n \"\"\"\n # Multi-dimentional shape checking\n if shape_checking:\n x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)\n assert isinstance(y, np.ndarray) and (y.ndim == 1)\n assert x.shape[-1] == len(y)\n\n # x.shape (..., x_mvaxis, traxis)\n nvarx, ntrl = x.shape[-2], x.shape[-1]\n u_y = np.unique(y)\n sh = x.shape[:-2]\n zm_shape = list(sh) + [len(u_y)]\n\n # joint variable along the mvaxis\n if not demeaned:\n x = x - x.mean(axis=-1, keepdims=True)\n\n # class-conditional entropies\n ntrl_y = np.zeros((len(u_y),), dtype=int)\n hcond = np.zeros(zm_shape, dtype=float)\n # c = .5 * (np.log(2. * np.pi) + 1)\n for num, yi in enumerate(u_y):\n idx = y == yi\n xm = x[..., idx]\n ntrl_y[num] = idx.sum()\n xm = xm - xm.mean(axis=-1, keepdims=True)\n cm = np.einsum('...ij, ...kj->...ik', xm, xm) / float(ntrl_y[num] - 1.)\n chcm = np.linalg.cholesky(cm)\n hcond[..., num] = np.log(np.einsum('...ii->...i', chcm)).sum(-1)\n\n # class weights\n w = ntrl_y / float(ntrl)\n\n # unconditional entropy from unconditional Gaussian fit\n cx = np.einsum('...ij, ...kj->...ik', x, x) / float(ntrl - 1.)\n chc = np.linalg.cholesky(cx)\n hunc = np.log(np.einsum('...ii->...i', chc)).sum(-1)\n\n ln2 = np.log(2)\n if biascorrect:\n vars = np.arange(1, nvarx + 1)\n\n psiterms = psi((ntrl - vars).astype(float) / 2.) / 2.\n dterm = (ln2 - np.log(float(ntrl - 1))) / 2.\n hunc = hunc - nvarx * dterm - psiterms.sum()\n\n dterm = (ln2 - np.log((ntrl_y - 1).astype(float))) / 2.\n psiterms = np.zeros_like(ntrl_y, dtype=float)\n for vi in vars:\n idx = ntrl_y - vi\n psiterms = psiterms + psi(idx.astype(np.float) / 2.)\n hcond = hcond - nvarx * dterm - (psiterms / 2.)\n\n # MI in bits\n i = (hunc - np.einsum('i, ...i', w, hcond)) / ln2\n return i\n\n\ndef cmi_nd_ggg(x, y, z, mvaxis=None, traxis=-1, biascorrect=True,\n demeaned=False, shape_checking=True):\n \"\"\"Multi-dimentional MI between three Gaussian variables in bits.\n\n This function is based on ANOVA style model comparison.\n\n Parameters\n ----------\n x, y, z : array_like\n Arrays to consider for computing the Mutual Information. The three\n input variables x, y and z should have the same shape except on the\n mvaxis (if needed).\n mvaxis : int | None\n Spatial location of the axis to consider if multi-variate analysis\n is needed\n traxis : int | -1\n Spatial location of the trial axis. By default the last axis is\n considered\n biascorrect : bool | True\n Specifies whether bias correction should be applied to the estimated MI\n demeaned : bool | False\n Specifies whether the input data already has zero mean (true if it has\n been copula-normalized)\n shape_checking : bool | True\n Perform a reshape and check that x and y shapes are consistents. For\n high performances and to avoid extensive memory usage, it's better to\n already have x and y with a shape of (..., mvaxis, traxis) and to set\n this parameter to False\n\n Returns\n -------\n mi : array_like\n The mutual information with the same shape as x, y and z without the\n mvaxis and traxis\n \"\"\"\n # Multi-dimentional shape checking\n if shape_checking:\n x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)\n y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)\n z = nd_reshape(z, mvaxis=mvaxis, traxis=traxis)\n nd_shape_checking(x, y, mvaxis, traxis)\n nd_shape_checking(x, z, mvaxis, traxis)\n\n # x.shape == y.shape == z.shape (..., x_mvaxis, traxis)\n ntrl = x.shape[-1]\n nvarx, nvary, nvarz = x.shape[-2], y.shape[-2], z.shape[-2]\n nvarxy = nvarx + nvary\n nvaryz = nvary + nvarz\n nvarxy = nvarx + nvary\n nvarxz = nvarx + nvarz\n nvarxyz = nvarx + nvaryz\n\n # joint variable along the mvaxis\n xyz = np.concatenate((x, y, z), axis=-2)\n if not demeaned:\n xyz -= xyz.mean(axis=-1, keepdims=True)\n cxyz = np.einsum('...ij, ...kj->...ik', xyz, xyz)\n cxyz /= float(ntrl - 1.)\n\n # submatrices of joint covariance\n cz = cxyz[..., nvarxy:, nvarxy:]\n cyz = cxyz[..., nvarx:, nvarx:]\n sh = list(cxyz.shape)\n sh[-1], sh[-2] = nvarxz, nvarxz\n cxz = np.zeros(tuple(sh), dtype=float)\n cxz[..., :nvarx, :nvarx] = cxyz[..., :nvarx, :nvarx]\n cxz[..., :nvarx, nvarx:] = cxyz[..., :nvarx, nvarxy:]\n cxz[..., nvarx:, :nvarx] = cxyz[..., nvarxy:, :nvarx]\n cxz[..., nvarx:, nvarx:] = cxyz[..., nvarxy:, nvarxy:]\n\n # Cholesky decomposition\n chcz = np.linalg.cholesky(cz)\n chcxz = np.linalg.cholesky(cxz)\n chcyz = np.linalg.cholesky(cyz)\n chcxyz = np.linalg.cholesky(cxyz)\n\n # entropies in nats\n # normalizations cancel for mutual information\n hz = np.log(np.einsum('...ii->...i', chcz)).sum(-1)\n hxz = np.log(np.einsum('...ii->...i', chcxz)).sum(-1)\n hyz = np.log(np.einsum('...ii->...i', chcyz)).sum(-1)\n hxyz = np.log(np.einsum('...ii->...i', chcxyz)).sum(-1)\n\n ln2 = np.log(2)\n if biascorrect:\n vec = np.arange(1, nvarxyz + 1)\n psiterms = psi((ntrl - vec).astype(np.float) / 2.0) / 2.0\n dterm = (ln2 - np.log(ntrl - 1.0)) / 2.0\n hz = hz - nvarz * dterm - psiterms[:nvarz].sum()\n hxz = hxz - nvarxz * dterm - psiterms[:nvarxz].sum()\n hyz = hyz - nvaryz * dterm - psiterms[:nvaryz].sum()\n hxyz = hxyz - nvarxyz * dterm - psiterms[:nvarxyz].sum()\n\n # MI in bits\n i = (hxz + hyz - hxyz - hz) / ln2\n return i\n\n\n###############################################################################\n###############################################################################\n# GAUSSIAN COPULA MUTUAL INFORMATION\n###############################################################################\n###############################################################################\n\n\ndef gcmi_nd_cc(x, y, mvaxis=None, traxis=-1, shape_checking=True, gcrn=True):\n \"\"\"GCMI between two continuous variables.\n\n The only difference with `mi_gg` is that a normalization is performed for\n each continuous variable.\n\n Parameters\n ----------\n x, y : array_like\n Continuous variables\n mvaxis : int | None\n Spatial location of the axis to consider if multi-variate analysis\n is needed\n traxis : int | -1\n Spatial location of the trial axis. By default the last axis is\n considered\n shape_checking : bool | True\n Perform a reshape and check that x and y shapes are consistents. For\n high performances and to avoid extensive memory usage, it's better to\n already have x and y with a shape of (..., mvaxis, traxis) and to set\n this parameter to False\n gcrn : bool | True\n Apply a Gaussian Copula rank normalization. This operation is\n relatively slow for big arrays.\n\n Returns\n -------\n mi : array_like\n The mutual information with the same shape as x and y, without the\n mvaxis and traxis\n \"\"\"\n # Multi-dimentional shape checking\n if shape_checking:\n x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)\n y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)\n nd_shape_checking(x, y, mvaxis, traxis)\n\n # x.shape (..., x_mvaxis, traxis)\n # y.shape (..., y_mvaxis, traxis)\n if gcrn:\n cx, cy = copnorm_nd(x, axis=-1), copnorm_nd(y, axis=-1)\n else:\n cx, cy = x, y\n return mi_nd_gg(cx, cy, mvaxis=-2, traxis=-1, biascorrect=True,\n demeaned=True, shape_checking=False)\n\n\ndef gcmi_model_nd_cd(x, y, mvaxis=None, traxis=-1, shape_checking=True,\n gcrn=True):\n \"\"\"GCMI between a continuous and discret variables.\n\n The only difference with `mi_gg` is that a normalization is performed for\n each continuous variable.\n\n Parameters\n ----------\n x : array_like\n Continuous variable\n y : array_like\n Discret variable of shape (n_trials,)\n mvaxis : int | None\n Spatial location of the axis to consider if multi-variate analysis\n is needed\n traxis : int | -1\n Spatial location of the trial axis. By default the last axis is\n considered\n shape_checking : bool | True\n Perform a reshape and check that x is consistents. For high\n performances and to avoid extensive memory usage, it's better to\n already have x with a shape of (..., mvaxis, traxis) and to set this\n parameter to False\n gcrn : bool | True\n Apply a Gaussian Copula rank normalization. This operation is\n relatively slow for big arrays.\n\n Returns\n -------\n mi : array_like\n The mutual information with the same shape as x, without the mvaxis and\n traxis\n \"\"\"\n # Multi-dimentional shape checking\n if shape_checking:\n x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)\n\n # x.shape (..., x_mvaxis, traxis)\n # y.shape (traxis)\n cx = copnorm_nd(x, axis=-1) if gcrn else x\n return mi_model_nd_gd(cx, y, mvaxis=-2, traxis=-1, biascorrect=True,\n demeaned=True, shape_checking=False)\n\n###############################################################################\n###############################################################################\n# GAUSSIAN COPULA CONTIONAL MUTUAL INFORMATION\n###############################################################################\n###############################################################################\n\n\ndef gccmi_nd_ccnd(x, y, *z, mvaxis=None, traxis=-1, gcrn=True,\n shape_checking=True, biascorrect=True, demeaned=True):\n \"\"\"Conditional GCMI between two continuous variables.\n\n This function performs a GC-CMI between 2 continuous variables conditioned\n with multiple discrete variables.\n\n Parameters\n ----------\n x, y : array_like\n Arrays to consider for computing the Mutual Information. The two input\n variables x and y should have the same shape except on the mvaxis\n (if needed).\n z : list | array_like\n Array that describes the conditions across the trial axis. Should be a\n list of arrays of shape (n_trials,) of integers\n (e.g. [0, 0, ..., 1, 1, 2, 2])\n mvaxis : int | None\n Spatial location of the axis to consider if multi-variate analysis\n is needed\n traxis : int | -1\n Spatial location of the trial axis. By default the last axis is\n considered\n gcrn : bool | True\n Apply a Gaussian Copula rank normalization. This operation is\n relatively slow for big arrays.\n shape_checking : bool | True\n Perform a reshape and check that x and y shapes are consistents. For\n high performances and to avoid extensive memory usage, it's better to\n already have x and y with a shape of (..., mvaxis, traxis) and to set\n this parameter to False\n\n Returns\n -------\n cmi : array_like\n Conditional mutual-information with the same shape as x and y without\n the mvaxis and traxis\n \"\"\"\n # Multi-dimentional shape checking\n if shape_checking:\n x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)\n y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)\n nd_shape_checking(x, y, mvaxis, traxis)\n ntrl = x.shape[-1]\n\n # Find unique values of each discret array\n prod_idx = discret_to_index(*z)\n # sh = x.shape[:-3] if isinstance(mvaxis, int) else x.shape[:-2]\n sh = x.shape[:-2]\n zm_shape = list(sh) + [len(prod_idx)]\n\n # calculate gcmi for each z value\n pz = np.zeros((len(prod_idx),), dtype=float)\n icond = np.zeros(zm_shape, dtype=float)\n for num, idx in enumerate(prod_idx):\n pz[num] = idx.sum()\n if gcrn:\n thsx = copnorm_nd(x[..., idx], axis=-1)\n thsy = copnorm_nd(y[..., idx], axis=-1)\n else:\n thsx = x[..., idx]\n thsy = y[..., idx]\n icond[..., num] = mi_nd_gg(thsx, thsy, mvaxis=-2, traxis=-1,\n biascorrect=biascorrect, demeaned=demeaned,\n shape_checking=False)\n pz /= ntrl\n\n # conditional mutual information\n cmi = np.sum(pz * icond, axis=-1)\n return cmi\n\n\ndef cmi_nd_ggd(x, y, z, mvaxis=None, traxis=-1, shape_checking=True,\n biascorrect=True, demeaned=False):\n \"\"\"Conditional MI between a continuous and a discret variable.\n\n This function performs a CMI between a continuous and a discret variable\n conditioned with multiple discrete variables.\n\n Parameters\n ----------\n x : array_like\n Continuous variable\n y : array_like\n Discret variable\n z : list | array_like\n Array that describes the conditions across the trial axis of shape\n (n_trials,)\n mvaxis : int | None\n Spatial location of the axis to consider if multi-variate analysis\n is needed\n traxis : int | -1\n Spatial location of the trial axis. By default the last axis is\n considered\n shape_checking : bool | True\n Perform a reshape and check that x and y shapes are consistents. For\n high performances and to avoid extensive memory usage, it's better to\n already have x and y with a shape of (..., mvaxis, traxis) and to set\n this parameter to False\n demeaned : bool | False\n Specifies whether the input data already has zero mean (true if it has\n been copula-normalized)\n\n Returns\n -------\n cmi : array_like\n Conditional mutual-information with the same shape as x and y without\n the mvaxis and traxis\n \"\"\"\n # Multi-dimentional shape checking\n if shape_checking:\n x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)\n y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)\n nd_shape_checking(x, y, mvaxis, traxis)\n ntrl = x.shape[-1]\n assert (z.ndim == 1) and (len(z) == ntrl)\n ntrl = x.shape[-1]\n\n # sh = x.shape[:-3] if isinstance(mvaxis, int) else x.shape[:-2]\n u_z = np.unique(z)\n sh = x.shape[:-2]\n zm_shape = list(sh) + [len(u_z)]\n\n # calculate gcmi for each z value\n pz = np.zeros((len(u_z),), dtype=float)\n icond = np.zeros(zm_shape, dtype=float)\n for n_z, zi in enumerate(u_z):\n idx = z == zi\n pz[n_z] = idx.sum()\n thsx, thsy = x[..., idx], y[..., idx]\n icond[..., n_z] = mi_nd_gg(thsx, thsy, mvaxis=-2, traxis=-1,\n biascorrect=biascorrect, demeaned=demeaned,\n shape_checking=False)\n pz /= ntrl\n\n # conditional mutual information\n cmi = np.sum(np.multiply(pz, icond), axis=-1)\n return cmi\n\n\ndef gccmi_model_nd_cdnd(x, y, *z, mvaxis=None, traxis=-1, gcrn=True,\n shape_checking=True):\n \"\"\"Conditional GCMI between a continuous and a discret variable.\n\n This function performs a GC-CMI between a continuous and a discret\n variable conditioned with multiple discrete variables.\n\n Parameters\n ----------\n x : array_like\n Continuous variable\n y : array_like\n Discret variable\n z : list | array_like\n Array that describes the conditions across the trial axis. Should be a\n list of arrays of shape (n_trials,) of integers\n (e.g. [0, 0, ..., 1, 1, 2, 2])\n mvaxis : int | None\n Spatial location of the axis to consider if multi-variate analysis\n is needed\n traxis : int | -1\n Spatial location of the trial axis. By default the last axis is\n considered\n gcrn : bool | True\n Apply a Gaussian Copula rank normalization. This operation is\n relatively slow for big arrays.\n shape_checking : bool | True\n Perform a reshape and check that x and y shapes are consistents. For\n high performances and to avoid extensive memory usage, it's better to\n already have x and y with a shape of (..., mvaxis, traxis) and to set\n this parameter to False\n\n Returns\n -------\n cmi : array_like\n Conditional mutual-information with the same shape as x and y without\n the mvaxis and traxis\n \"\"\"\n # Multi-dimentional shape checking\n if shape_checking:\n x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)\n assert isinstance(y, np.ndarray) and (y.ndim == 1)\n assert x.shape[-1] == len(y)\n ntrl = x.shape[-1]\n\n # Find unique values of each discret array\n prod_idx = discret_to_index(*z)\n # sh = x.shape[:-3] if isinstance(mvaxis, int) else x.shape[:-2]\n sh = x.shape[:-2]\n zm_shape = list(sh) + [len(prod_idx)]\n\n # calculate gcmi for each z value\n pz = np.zeros((len(prod_idx),), dtype=float)\n icond = np.zeros(zm_shape, dtype=float)\n for num, idx in enumerate(prod_idx):\n pz[num] = idx.sum()\n if gcrn:\n thsx = copnorm_nd(x[..., idx], axis=-1)\n else:\n thsx = x[..., idx]\n thsy = y[idx]\n icond[..., num] = mi_model_nd_gd(thsx, thsy, mvaxis=-2, traxis=-1,\n biascorrect=True, demeaned=True,\n shape_checking=False)\n pz /= ntrl\n\n # conditional mutual information\n cmi = np.sum(pz * icond, axis=-1)\n return cmi\n\n\ndef discret_to_index(*z):\n \"\"\"Convert a list of discret variables into boolean indices.\n\n Parameters\n ----------\n z : tuple | list\n List of discret variables\n\n Returns\n -------\n idx : list\n List of boolean arrays. Each array specify the condition to use\n \"\"\"\n if isinstance(z, np.ndarray) and (z.ndim == 1):\n return [z == k for k in np.unique(z)]\n elif isinstance(z, (tuple, list)):\n # array checking\n is_array = all([isinstance(k, np.ndarray) for k in z])\n is_vec = all([k.ndim == 1 for k in z])\n is_shape = all([z[0].shape == k.shape for k in z])\n if not (is_array and is_vec and is_shape):\n raise TypeError(\"z should be a list of 1-D array, all with the \"\n \"same shape\")\n\n # build unique indices\n u_z = tuple([tuple(np.unique(k)) for k in z])\n idx = []\n for k in product(*u_z):\n _idx = []\n for _c, _k in zip(z, k):\n _idx += [_c == _k]\n _idx_bool = np.all(np.c_[_idx], axis=0)\n if _idx_bool.any():\n idx += [_idx_bool]\n return idx\n\n\ndef gccmi_nd_ccc(x, y, z, mvaxis=None, traxis=-1, shape_checking=True,\n gcrn=True):\n \"\"\"GCCMI between two continuous variables conditioned on a third.\n\n Parameters\n ----------\n x, y, z : array_like\n Continuous variables. z is the continuous variable that is considered\n as the condition\n mvaxis : int | None\n Spatial location of the axis to consider if multi-variate analysis\n is needed\n traxis : int | -1\n Spatial location of the trial axis. By default the last axis is\n considered\n shape_checking : bool | True\n Perform a reshape and check that x and y shapes are consistents. For\n high performances and to avoid extensive memory usage, it's better to\n already have x and y with a shape of (..., mvaxis, traxis) and to set\n this parameter to False\n gcrn : bool | True\n Apply a Gaussian Copula rank normalization. This operation is\n relatively slow for big arrays.\n\n Returns\n -------\n mi : array_like\n The mutual information with the same shape as x and y, without the\n mvaxis and traxis\n \"\"\"\n # Multi-dimentional shape checking\n if shape_checking:\n x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)\n y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)\n z = nd_reshape(z, mvaxis=mvaxis, traxis=traxis)\n nd_shape_checking(x, y, mvaxis, traxis)\n nd_shape_checking(x, z, mvaxis, traxis)\n\n # x.shape == y.shape == z.shape (..., x_mvaxis, traxis)\n if gcrn:\n cx, cy = copnorm_nd(x, axis=-1), copnorm_nd(y, axis=-1)\n cz = copnorm_nd(z, axis=-1)\n else:\n cx, cy, cz = x, y, z\n return cmi_nd_ggg(cx, cy, cz, mvaxis=-2, traxis=-1, biascorrect=True,\n demeaned=True, shape_checking=False)\n" ]
[ [ "numpy.log", "numpy.einsum", "numpy.unique", "numpy.multiply", "numpy.arange", "numpy.concatenate", "numpy.all", "numpy.zeros_like", "numpy.linalg.cholesky", "numpy.moveaxis", "numpy.sum", "numpy.zeros" ] ]
gamcoh/U-2-Net
[ "a116a06ebdc46772b2b400ad180319b89c8ce72c" ]
[ "u2net_test.py" ]
[ "import os\nfrom skimage import io, transform\nimport torch\nimport torchvision\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms#, utils\nimport argparse\n# import torch.optim as optim\n\nimport numpy as np\nfrom PIL import Image\nimport glob\n\nfrom data_loader import RescaleT\nfrom data_loader import ToTensor\nfrom data_loader import ToTensorLab\nfrom data_loader import SalObjDataset\n\nfrom model import U2NET # full size version 173.6 MB\nfrom model import U2NETP # small version u2net 4.7 MB\n\n# normalize the predicted SOD probability map\ndef normPRED(d):\n ma = torch.max(d)\n mi = torch.min(d)\n\n dn = (d-mi)/(ma-mi)\n\n return dn\n\ndef save_output(image_name,pred,d_dir, colored=False):\n predict = pred\n predict = predict.squeeze()\n predict_np = predict.cpu().data.numpy()\n\n im = Image.fromarray(predict_np*255).convert('RGB')\n img_name = image_name.split(\"/\")[-1]\n image = io.imread(image_name)\n imo = im.resize((image.shape[1],image.shape[0]),resample=Image.BILINEAR)\n\n aaa = img_name.split(\".\")\n bbb = aaa[0:-1]\n imidx = bbb[0]\n for i in range(1,len(bbb)):\n imidx = imidx + \".\" + bbb[i]\n\n imo.save(d_dir+imidx+'.png')\n\n if colored:\n oriimg = Image.open(image_name)\n bin_image = predict_np*255\n bin_image = Image.fromarray(bin_image).convert('RGB')\n bin_image = bin_image.resize((oriimg.width, oriimg.height), resample=Image.BILINEAR)\n bin_image = np.array(bin_image)\n bin_image = np.where(bin_image > 200, 1, 0)\n colored_img = bin_image * np.array(oriimg)\n colored_img = Image.fromarray(colored_img.astype(np.uint8))\n img = colored_img.convert(\"RGBA\")\n datas = img.getdata()\n newData = []\n for item in datas:\n if item[0] == 0 and item[1] == 0 and item[2] == 0:\n newData.append((0, 0, 0, 0))\n else:\n newData.append(item)\n img.putdata(newData)\n path = d_dir+imidx+'_COLORED.png'\n img.save(path, 'PNG')\n return path\n\ndef main(colored=False, imagepath=''):\n\n # --------- 1. get image path and name ---------\n model_name='u2net'#u2netp\n\n prediction_dir = './test_data/' + model_name + '_results/'\n model_dir = './saved_models/'+ model_name + '/' + model_name + '.pth'\n\n # --------- 2. dataloader ---------\n #1. dataloader\n test_salobj_dataset = SalObjDataset(img_name_list = [imagepath],\n lbl_name_list = [],\n transform=transforms.Compose([RescaleT(320),\n ToTensorLab(flag=0)])\n )\n test_salobj_dataloader = DataLoader(test_salobj_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=1)\n\n # --------- 3. model define ---------\n if(model_name=='u2net'):\n print(\"...load U2NET---173.6 MB\")\n net = U2NET(3,1)\n elif(model_name=='u2netp'):\n print(\"...load U2NEP---4.7 MB\")\n net = U2NETP(3,1)\n net.load_state_dict(torch.load(model_dir, map_location=torch.device('cpu')))\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n\n # --------- 4. inference for each image ---------\n for _, data_test in enumerate(test_salobj_dataloader):\n inputs_test = data_test['image']\n inputs_test = inputs_test.type(torch.FloatTensor)\n\n if torch.cuda.is_available():\n inputs_test = Variable(inputs_test.cuda())\n else:\n inputs_test = Variable(inputs_test)\n\n d1,d2,d3,d4,d5,d6,d7= net(inputs_test)\n\n # normalization\n pred = d1[:,0,:,:]\n pred = normPRED(pred)\n\n del d1,d2,d3,d4,d5,d6,d7\n # save results to test_results folder\n return save_output(imagepath, pred, prediction_dir, colored=colored)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--colored', default=False, type=bool, dest='colored', help='Save the colored version of the result. Default=False.')\n args = parser.parse_args()\n main(args.colored)\n" ]
[ [ "torch.max", "torch.min", "torch.utils.data.DataLoader", "torch.cuda.is_available", "torch.device", "numpy.array", "numpy.where", "torch.autograd.Variable" ] ]
ChipWan/pytorch-lightning
[ "2c31beccfbfe0752306122a2ba6f9822ec5cb6b8" ]
[ "pytorch_lightning/trainer/training_io.py" ]
[ "\"\"\"\nLightning can automate saving and loading checkpoints\n=====================================================\n\nCheckpointing is enabled by default to the current working directory.\nTo change the checkpoint path pass in::\n\n Trainer(default_root_dir='/your/path/to/save/checkpoints')\n\n\nTo modify the behavior of checkpointing pass in your own callback.\n\n.. code-block:: python\n\n from pytorch_lightning.callbacks import ModelCheckpoint\n\n # DEFAULTS used by the Trainer\n checkpoint_callback = ModelCheckpoint(\n filepath=os.getcwd(),\n save_top_k=1,\n verbose=True,\n monitor='val_loss',\n mode='min',\n prefix=''\n )\n\n trainer = Trainer(checkpoint_callback=checkpoint_callback)\n\n\nRestoring training session\n--------------------------\n\nYou might want to not only load a model but also continue training it. Use this method to\nrestore the trainer state as well. This will continue from the epoch and global step you last left off.\nHowever, the dataloaders will start from the first batch again (if you shuffled it shouldn't matter).\n\nLightning will restore the session if you pass a logger with the same version and there's a saved checkpoint.\n\n.. code-block:: python\n\n from pytorch_lightning import Trainer\n\n trainer = Trainer(\n resume_from_checkpoint=PATH\n )\n\n # this fit call loads model weights and trainer state\n # the trainer continues seamlessly from where you left off\n # without having to do anything else.\n trainer.fit(model)\n\n\nThe trainer restores:\n\n- global_step\n- current_epoch\n- All optimizers\n- All lr_schedulers\n- Model weights\n\nYou can even change the logic of your model as long as the weights and \"architecture\" of\nthe system isn't different. If you add a layer, for instance, it might not work.\n\nAt a rough level, here's what happens inside Trainer :py:mod:`pytorch_lightning.base_module.saving.py`:\n\n.. code-block:: python\n\n self.global_step = checkpoint['global_step']\n self.current_epoch = checkpoint['epoch']\n\n # restore the optimizers\n optimizer_states = checkpoint['optimizer_states']\n for optimizer, opt_state in zip(self.optimizers, optimizer_states):\n optimizer.load_state_dict(opt_state)\n\n # restore the lr schedulers\n lr_schedulers = checkpoint['lr_schedulers']\n for scheduler, lrs_state in zip(self.lr_schedulers, lr_schedulers):\n scheduler['scheduler'].load_state_dict(lrs_state)\n\n # uses the model you passed into trainer\n model.load_state_dict(checkpoint['state_dict'])\n\n\"\"\"\n\nimport io\nimport os\nimport re\nimport signal\nfrom abc import ABC\nfrom distutils.version import LooseVersion\nfrom subprocess import call\n\nimport torch\nimport torch.distributed as torch_distrib\n\nimport pytorch_lightning\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.overrides.data_parallel import (\n LightningDistributedDataParallel,\n LightningDataParallel,\n)\nfrom pytorch_lightning.utilities import rank_zero_warn, AMPType\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\nfrom pytorch_lightning.utilities.cloud_io import cloud_open, gfile, makedirs\n\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\ntry:\n import horovod.torch as hvd\nexcept (ModuleNotFoundError, ImportError):\n HOROVOD_AVAILABLE = False\nelse:\n HOROVOD_AVAILABLE = True\n\ntry:\n from omegaconf import Container\nexcept ImportError:\n OMEGACONF_AVAILABLE = False\nelse:\n OMEGACONF_AVAILABLE = True\n\n\nclass TrainerIOMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n model: LightningModule\n on_gpu: bool\n root_gpu: ...\n resume_from_checkpoint: ...\n use_ddp: bool\n use_ddp2: bool\n use_horovod: bool\n checkpoint_callback: ...\n global_rank: int\n weights_save_path: str\n logger: LightningLoggerBase\n early_stop_callback: ...\n lr_schedulers: ...\n optimizers: ...\n on_tpu: bool\n num_training_batches: int\n accumulate_grad_batches: int\n scaler: ...\n use_tpu: bool\n amp_type: AMPType\n\n def get_model(self):\n is_dp_module = isinstance(self.model, (LightningDistributedDataParallel, LightningDataParallel))\n model = self.model.module if is_dp_module else self.model\n return model\n\n # --------------------\n # CHECK-POINTING\n # --------------------\n def restore_weights(self, model: LightningModule):\n \"\"\"\n We attempt to restore weights in this order:\n 1. HPC weights.\n 2. if no HPC weights restore checkpoint_path weights\n 3. otherwise don't restore weights\n \"\"\"\n # clear cache before restore\n if self.on_gpu:\n torch.cuda.empty_cache()\n\n # if script called from hpc resubmit, load weights\n did_restore_hpc_weights = self.restore_hpc_weights_if_needed(model)\n\n # clear cache after restore\n if self.on_gpu:\n torch.cuda.empty_cache()\n\n if not did_restore_hpc_weights:\n if self.resume_from_checkpoint is not None:\n self.restore(self.resume_from_checkpoint, on_gpu=self.on_gpu)\n\n # wait for all models to restore weights\n if self.use_ddp or self.use_ddp2:\n # wait for all processes to catch up\n torch_distrib.barrier()\n\n # wait for all models to restore weights\n if self.on_tpu and XLA_AVAILABLE:\n # wait for all processes to catch up\n torch_xla.core.xla_model.rendezvous(\"pl.TrainerIOMixin.restore_weights\")\n\n elif self.use_horovod:\n # wait for all processes to catch up\n hvd.join()\n\n # clear cache after restore\n if self.on_gpu:\n torch.cuda.empty_cache()\n\n # --------------------\n # HPC SIGNAL HANDLING\n # --------------------\n def register_slurm_signal_handlers(self):\n # see if we're using slurm (not interactive)\n on_slurm = False\n try:\n job_name = os.environ['SLURM_JOB_NAME']\n if job_name != 'bash':\n on_slurm = True\n except Exception:\n pass\n\n if on_slurm:\n log.info('Set SLURM handle signals.')\n signal.signal(signal.SIGUSR1, self.sig_handler)\n signal.signal(signal.SIGTERM, self.term_handler)\n\n def sig_handler(self, signum, frame): # pragma: no-cover\n if self.is_global_zero:\n # save weights\n log.info('handling SIGUSR1')\n self.hpc_save(self.weights_save_path, self.logger)\n\n # find job id\n job_id = os.environ['SLURM_JOB_ID']\n cmd = ['scontrol', 'requeue', job_id]\n\n # requeue job\n log.info(f'requeing job {job_id}...')\n result = call(cmd)\n\n # print result text\n if result == 0:\n log.info(f'requeued exp {job_id}')\n else:\n log.warning('requeue failed...')\n\n # close experiment to avoid issues\n self.logger.close()\n\n def term_handler(self, signum, frame):\n # save\n log.info(\"bypassing sigterm\")\n\n # --------------------\n # MODEL SAVE CHECKPOINT\n # --------------------\n def _atomic_save(self, checkpoint, filepath: str):\n \"\"\"Saves a checkpoint atomically, avoiding the creation of incomplete checkpoints.\n\n This will create a temporary checkpoint with a suffix of ``.part``, then copy it to the final location once\n saving is finished.\n\n Args:\n checkpoint: The object to save.\n Built to be used with the ``dump_checkpoint`` method, but can deal with anything which ``torch.save``\n accepts.\n filepath: The path to which the checkpoint will be saved.\n This points to the file that the checkpoint will be stored in.\n \"\"\"\n bytesbuffer = io.BytesIO()\n # Can't use the new zipfile serialization for 1.6.0 because there's a bug in\n # torch.hub.load_state_dict_from_url() that prevents it from loading the new files.\n # More details can be found here: https://github.com/pytorch/pytorch/issues/42239\n if LooseVersion(torch.__version__).version[:3] == [1, 6, 0]:\n torch.save(checkpoint, bytesbuffer, _use_new_zipfile_serialization=False)\n else:\n torch.save(checkpoint, bytesbuffer)\n with cloud_open(filepath, 'wb') as f:\n f.write(bytesbuffer.getvalue())\n\n def save_checkpoint(self, filepath, weights_only: bool = False):\n checkpoint = self.dump_checkpoint(weights_only)\n\n if self.is_global_zero:\n # do the actual save\n try:\n self._atomic_save(checkpoint, filepath)\n except AttributeError as err:\n if LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in checkpoint:\n del checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]\n rank_zero_warn(\n 'Warning, `module_arguments` dropped from checkpoint.' f' An attribute is not picklable {err}'\n )\n self._atomic_save(checkpoint, filepath)\n\n def restore(self, checkpoint_path: str, on_gpu: bool):\n \"\"\"\n Restore training state from checkpoint.\n Also restores all training state like:\n - epoch\n - callbacks\n - schedulers\n - optimizer\n \"\"\"\n\n # if on_gpu:\n # checkpoint = torch.load(checkpoint_path)\n # else:\n # load on CPU first\n checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)\n\n # load model state\n model = self.get_model()\n\n # load the state_dict on the model automatically\n model.load_state_dict(checkpoint['state_dict'])\n\n # give model a chance to load something\n model.on_load_checkpoint(checkpoint)\n\n if on_gpu:\n model.cuda(self.root_gpu)\n\n # restore amp scaling\n if self.amp_type == AMPType.NATIVE and 'native_amp_scaling_state' in checkpoint:\n self.scaler.load_state_dict(checkpoint['native_amp_scaling_state'])\n elif self.amp_type == AMPType.APEX and 'amp_scaling_state' in checkpoint:\n amp.load_state_dict(checkpoint['amp_scaling_state'])\n\n # load training state (affects trainer only)\n self.restore_training_state(checkpoint)\n\n def dump_checkpoint(self, weights_only: bool = False) -> dict:\n \"\"\"Creating model checkpoint.\n\n Args:\n weights_only: saving model weights only\n\n Return:\n structured dictionary\n \"\"\"\n checkpoint = {\n 'epoch': self.current_epoch + 1,\n 'global_step': self.global_step + 1,\n 'pytorch-lightning_version': pytorch_lightning.__version__,\n }\n\n if not weights_only:\n\n # TODO support more generic way for callbacks to persist a state_dict in a checkpoint\n checkpoint_callbacks = [c for c in self.callbacks if isinstance(c, ModelCheckpoint)]\n early_stopping_callbacks = [c for c in self.callbacks if isinstance(c, EarlyStopping)]\n\n if checkpoint_callbacks:\n # we add the official checkpoint callback to the end of the list\n # extra user provided callbacks will not be persisted yet\n checkpoint[ModelCheckpoint.CHECKPOINT_STATE_BEST_SCORE] = self.checkpoint_callback.best_model_score\n checkpoint[ModelCheckpoint.CHECKPOINT_STATE_BEST_PATH] = self.checkpoint_callback.best_model_path\n\n if early_stopping_callbacks and checkpoint_callbacks:\n # we add the official early stopping callback to the end of the list\n # extra user provided callbacks will not be persisted yet\n checkpoint['early_stop_callback_state_dict'] = early_stopping_callbacks[-1].state_dict()\n\n # save optimizers\n optimizer_states = []\n for i, optimizer in enumerate(self.optimizers):\n optimizer_states.append(optimizer.state_dict())\n checkpoint['optimizer_states'] = optimizer_states\n\n # save lr schedulers\n lr_schedulers = []\n for scheduler in self.lr_schedulers:\n lr_schedulers.append(scheduler['scheduler'].state_dict())\n checkpoint['lr_schedulers'] = lr_schedulers\n\n # save native amp scaling\n if self.amp_type == AMPType.NATIVE and not self.use_tpu:\n checkpoint['native_amp_scaling_state'] = self.scaler.state_dict()\n elif self.amp_type == AMPType.APEX:\n checkpoint['amp_scaling_state'] = amp.state_dict()\n\n # add the module_arguments and state_dict from the model\n model = self.get_model()\n\n checkpoint['state_dict'] = model.state_dict()\n\n if model.hparams:\n if hasattr(model, '_hparams_name'):\n checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_NAME] = model._hparams_name\n # add arguments to the checkpoint\n checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY] = model.hparams\n if OMEGACONF_AVAILABLE:\n if isinstance(model.hparams, Container):\n checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_TYPE] = type(model.hparams)\n\n # give the model a chance to add a few things\n model.on_save_checkpoint(checkpoint)\n\n return checkpoint\n\n # --------------------\n # HPC IO\n # --------------------\n def restore_hpc_weights_if_needed(self, model: LightningModule):\n \"\"\"If there is a set of hpc weights, use as signal to restore model.\"\"\"\n did_restore = False\n\n # look for hpc weights\n folderpath = str(self.weights_save_path)\n if gfile.exists(folderpath):\n files = gfile.listdir(folderpath)\n hpc_weight_paths = [x for x in files if 'hpc_ckpt' in x]\n\n # if hpc weights exist restore model\n if len(hpc_weight_paths) > 0:\n self.hpc_load(folderpath, self.on_gpu)\n did_restore = True\n return did_restore\n\n def restore_training_state(self, checkpoint):\n \"\"\"\n Restore trainer state.\n Model will get its change to update\n :param checkpoint:\n :return:\n \"\"\"\n if 'optimizer_states' not in checkpoint or 'lr_schedulers' not in checkpoint:\n raise KeyError(\n 'Trying to restore training state but checkpoint contains only the model.'\n ' This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`.'\n )\n\n # TODO support more generic way for callbacks to load callback state_dicts\n checkpoint_callbacks = [c for c in self.callbacks if isinstance(c, ModelCheckpoint)]\n early_stopping_callbacks = [c for c in self.callbacks if isinstance(c, EarlyStopping)]\n\n if checkpoint_callbacks:\n if ModelCheckpoint.CHECKPOINT_STATE_BEST_SCORE in checkpoint:\n checkpoint_callbacks[-1].best_model_score = checkpoint[ModelCheckpoint.CHECKPOINT_STATE_BEST_SCORE]\n else:\n # Old naming until version 0.7.6\n rank_zero_warn(\n 'Loading a checkpoint created with an old version of Lightning; '\n 'this will not be supported in the future.'\n )\n checkpoint_callbacks[-1].best_model_score = checkpoint['checkpoint_callback_best']\n checkpoint_callbacks[-1].best_model_path = checkpoint[ModelCheckpoint.CHECKPOINT_STATE_BEST_PATH]\n\n if early_stopping_callbacks:\n state = checkpoint['early_stop_callback_state_dict']\n early_stopping_callbacks[-1].load_state_dict(state)\n\n self.global_step = checkpoint['global_step']\n self.current_epoch = checkpoint['epoch']\n\n # Division deals with global step stepping once per accumulated batch\n # Inequality deals with different global step for odd vs even num_training_batches\n n_accum = 1 if self.accumulate_grad_batches is None else self.accumulate_grad_batches\n expected_steps = self.num_training_batches / n_accum\n if self.num_training_batches != 0 and self.global_step % expected_steps > 1:\n rank_zero_warn(\n \"You're resuming from a checkpoint that ended mid-epoch. \"\n \"This can cause unreliable results if further training is done, \"\n \"consider using an end of epoch checkpoint. \"\n )\n\n # restore the optimizers\n optimizer_states = checkpoint['optimizer_states']\n for optimizer, opt_state in zip(self.optimizers, optimizer_states):\n optimizer.load_state_dict(opt_state)\n\n # move optimizer to GPU 1 weight at a time\n # avoids OOM\n if self.root_gpu is not None:\n for state in optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda(self.root_gpu)\n\n # restore the lr schedulers\n lr_schedulers = checkpoint['lr_schedulers']\n for scheduler, lrs_state in zip(self.lr_schedulers, lr_schedulers):\n scheduler['scheduler'].load_state_dict(lrs_state)\n\n # ----------------------------------\n # PRIVATE OPS\n # ----------------------------------\n def hpc_save(self, folderpath: str, logger):\n # make sure the checkpoint folder exists\n folderpath = str(folderpath) # because the tests pass a path object\n if not gfile.exists(folderpath):\n makedirs(folderpath)\n\n # save logger to make sure we get all the metrics\n logger.save()\n\n ckpt_number = self.max_ckpt_in_folder(folderpath) + 1\n\n if not gfile.exists(folderpath):\n makedirs(folderpath)\n filepath = os.path.join(folderpath, f'hpc_ckpt_{ckpt_number}.ckpt')\n\n # give model a chance to do something on hpc_save\n model = self.get_model()\n checkpoint = self.dump_checkpoint()\n\n model.on_hpc_save(checkpoint)\n\n # do the actual save\n # TODO: fix for anything with multiprocess DP, DDP, DDP2\n try:\n self._atomic_save(checkpoint, filepath)\n except AttributeError as err:\n if LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in checkpoint:\n del checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]\n rank_zero_warn(\n 'warning, `module_arguments` dropped from checkpoint.' f' An attribute is not picklable {err}'\n )\n self._atomic_save(checkpoint, filepath)\n\n return filepath\n\n def hpc_load(self, folderpath, on_gpu):\n filepath = '{}/hpc_ckpt_{}.ckpt'.format(folderpath, self.max_ckpt_in_folder(folderpath))\n\n # load on CPU first\n checkpoint = torch.load(filepath, map_location=lambda storage, loc: storage)\n\n # load model state\n model = self.get_model()\n\n # load the state_dict on the model automatically\n model.load_state_dict(checkpoint['state_dict'])\n\n # restore amp scaling\n if self.amp_type == AMPType.NATIVE and 'native_amp_scaling_state' in checkpoint:\n self.scaler.load_state_dict(checkpoint['native_amp_scaling_state'])\n elif self.amp_type == AMPType.APEX and 'amp_scaling_state' in checkpoint:\n amp.load_state_dict(checkpoint['amp_scaling_state'])\n\n if self.root_gpu is not None:\n model.cuda(self.root_gpu)\n\n # load training state (affects trainer only)\n self.restore_training_state(checkpoint)\n\n # call model hook\n model.on_hpc_load(checkpoint)\n\n log.info(f'restored hpc model from: {filepath}')\n\n def max_ckpt_in_folder(self, path, name_key='ckpt_'):\n files = gfile.listdir(str(path))\n files = [x for x in files if name_key in x]\n if len(files) == 0:\n return 0\n\n ckpt_vs = []\n for name in files:\n name = name.split(name_key)[-1]\n name = re.sub('[^0-9]', '', name)\n ckpt_vs.append(int(name))\n\n return max(ckpt_vs)\n" ]
[ [ "torch.save", "torch.cuda.empty_cache", "torch.distributed.barrier", "torch.load" ] ]
anadeba/AV-AMEXPERT
[ "c69b738b6055c2d31b471ebda53bf4599457981e" ]
[ "data_preprocessing2.py" ]
[ "import pandas as pd\r\nimport numpy as np\r\nfrom datetime import datetime\r\n\r\ndata_processed_train = pd.read_csv(r'X:\\Hackathon\\AV - AMEXPERT\\train_amex\\data_processed_train.csv')\r\n\r\ndata_processed_train['DateTime'] = pd.to_datetime(data_processed_train['DateTime'])\r\ndata_processed_train['Dayofweek'] = data_processed_train['DateTime'].apply(lambda x : x.weekday())\r\ndata_processed_train['Hourofday'] = data_processed_train['DateTime'].apply(lambda x : x.hour)\r\n\r\ndata_processed_train.to_csv(r'X:\\Hackathon\\AV - AMEXPERT\\train_amex\\data_processed_train2.csv', index=False)" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
obserthinker/FDTD-CPML
[ "7f2351d0d81b4fcbba9bc6a47d677d4c1f6fccc4" ]
[ "1D/1D/Ex.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\n\ndz = 0.015\nspace = 30 \ntime = 300 \ndata = np.loadtxt('Ex.txt')\n\nfig = plt.figure()\nax = plt.axes(xlim = (0,1), ylim = (-0.1,1))\nline, = ax.plot([],[],lw = 2)\n\ndef init():\n\tline.set_data([],[])\n\treturn line,\n\ndef animate(i):\n x = np.arange(0 * dz, (space + 1) * dz, dz)\n y = data[i]\n line.set_data(x,y)\n return line,\n\nanim = animation.FuncAnimation(fig, animate, init_func=init, frames=time, interval=50, blit=True)\n\n#anim.save('ba.mp4', fps = 30, extra_args=['-vcodec', 'libx264'])\n\nplt.show()" ]
[ [ "numpy.arange", "matplotlib.pyplot.axes", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.show", "numpy.loadtxt", "matplotlib.pyplot.figure" ] ]
VinceBaz/neuromaps
[ "6758b53e127d1563fa06eb26bc5f08a4e24ae7e7" ]
[ "neuromaps/nulls/tests/test_spins.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nFor testing neuromaps.nulls.spins functionality\n\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom neuromaps.nulls import spins\n\n\ndef test_load_spins():\n out = np.random.randint(1000, size=(100, 100), dtype='int32')\n assert out is spins.load_spins(out)\n assert np.allclose(out[:, :10], spins.load_spins(out, n_perm=10))\n\n\[email protected]\ndef test_get_parcel_centroids():\n assert False\n\n\[email protected]\ndef test__gen_rotation():\n assert False\n\n\[email protected]\ndef test_gen_spinsamples():\n assert False\n\n\[email protected]\ndef test_spin_parcels():\n assert False\n\n\[email protected]\ndef test_parcels_to_vertices():\n assert False\n\n\[email protected]\ndef test_vertices_to_parcels():\n assert False\n\n\[email protected]\ndef test_spin_data():\n assert False\n" ]
[ [ "numpy.random.randint" ] ]
thomaswmorris/sirepo-bluesky
[ "05be2df43d56f2b5e69fb9511de9a424ad1e8b79" ]
[ "sirepo_bluesky/srw_handler.py" ]
[ "import numpy as np\nimport srwpy.uti_plot_com as srw_io\n\n\ndef read_srw_file(filename, ndim=2):\n data, mode, ranges, labels, units = srw_io.file_load(filename)\n data = np.array(data)\n if ndim == 2:\n data = data.reshape((ranges[8], ranges[5]), order='C')\n photon_energy = ranges[0]\n elif ndim == 1:\n photon_energy = np.linspace(*ranges[:3])\n else:\n raise ValueError(f'The value ndim={ndim} is not supported.')\n\n return {'data': data,\n 'shape': data.shape,\n 'mean': np.mean(data),\n 'photon_energy': photon_energy,\n 'horizontal_extent': ranges[3:5],\n 'vertical_extent': ranges[6:8],\n # 'mode': mode,\n 'labels': labels,\n 'units': units}\n\n\nclass SRWFileHandler:\n specs = {'srw'}\n\n def __init__(self, filename, ndim=2):\n self._name = filename\n self._ndim = ndim\n\n def __call__(self):\n d = read_srw_file(self._name, ndim=self._ndim)\n return d['data']\n" ]
[ [ "numpy.array", "numpy.mean", "numpy.linspace" ] ]
hammond756/redbull-f1
[ "70fe15356b7798f11b8c7eafe1e6b432d7ccc546" ]
[ "models/simple_network.py" ]
[ "import os, sys, math\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable as var\nimport matplotlib.pyplot as plt\n\n\ndef train_model():\n dataset = TorcsDataLoader.TorcsTrackDataset(['aalborg.csv', 'alpine-1.csv', 'f-speedway.csv'])\n x = var(torch.FloatTensor(dataset.carstates))\n y = var(torch.FloatTensor(dataset.targets), requires_grad=False)\n\n # x = var(torch.from_numpy(x_train).type(torch.FloatTensor), requires_grad=True)\n # y = var(torch.from_numpy(y_train).type(torch.FloatTensor), requires_grad=False)\n\n model = SimpleNetwork(dataset.x_dim, 13, 1)\n loss_fn = torch.nn.MSELoss(size_average=False)\n learning_rate = 1e-7\n\n for t in range(1000):\n # Forward pass: compute predicted y by passing x to the model. Module objects\n # override the __call__ operator so you can call them like functions. When\n # doing so you pass a Variable of input data to the Module and it produces\n # a Variable of output data.\n y_pred = model(x)\n\n # Compute and print loss. We pass Variables containing the predicted and true\n # values of y, and the loss function returns a Variable containing the\n # loss.\n loss = loss_fn(y_pred, y)\n # Zero the gradients before running the backward pass.\n model.zero_grad()\n\n # Backward pass: compute gradient of the loss with respect to all the learnable\n # parameters of the model. Internally, the parameters of each Module are stored\n # in Variables with requires_grad=True, so this call will compute gradients for\n # all learnable parameters in the model.\n loss.backward()\n\n # torch.nn.utils.clip_grad_norm(model.parameters(), 0.25)\n\n # Update the weights using gradient descent. Each parameter is a Variable, so\n # we can access its data and gradients like we did before.\n for param in model.parameters():\n param.data -= learning_rate * param.grad.data\n\n if t % 100 == 0:\n print(\"epoch\", t, \"total loss\", loss.data)\n\n TorcsDataLoader.save_parameters(model, 'steering-all')\n return\n\nclass SimpleNetwork(nn.Module):\n def __init__(self, in_dim, hidden_units, out_dim):\n super(SimpleNetwork, self).__init__()\n self.in_dim = in_dim\n self.hidden_units = hidden_units\n self.out_dim = out_dim\n\n self.lin1 = torch.nn.Linear(in_dim, hidden_units)\n self.lin2 = torch.nn.Linear(hidden_units, out_dim)\n\n def forward(self, inputs):\n out = self.lin1(inputs)\n # out = F.sigmoid(out)\n out = self.lin2(out)\n out = F.tanh(out)\n return out\n\n def get_n_units(self):\n return (self.in_dim, self.hidden_units, self.out_dim)\n\nif __name__ == '__main__':\n import TorcsDataLoader\n train_model()\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.tanh", "torch.FloatTensor", "torch.nn.MSELoss" ] ]
gfwm0502/Paddle
[ "cbce0e603ac2900258dfe29218860f30448aa53e" ]
[ "python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/sharding_optimizer_stage2.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#Taken and modified for fairscale from:\n# https://github.com/facebookresearch/fairscale/blob/main/fairscale/optim/oss.py\n#Commit: 8acbec718f3c70a6b9785470bb9e05cd84fc3f8e\n\nimport copy\nimport logging\nimport numpy as np\nfrom itertools import chain\nfrom functools import reduce\nfrom collections import OrderedDict\n\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import core\nimport paddle.distributed as dist\nfrom paddle.optimizer import Optimizer\nfrom paddle.fluid.clip import ClipGradByGlobalNorm\nfrom paddle.distributed.collective import _get_global_group\n\nfrom ...utils.internal_storage import ParamStorage, GradStorage\nfrom ...meta_parallel.sharding.sharding_utils import Type, device_guard, ShardingClipGrad\n\n# CUDA alignment 256 bytes, cpu alignment 4096 bytes\nalignment = {\"gpu\": 256, \"cpu\": 4096}\nalign = {\n Type.fp16.value: 2,\n Type.fp32.value: 4,\n}\n\n__all__ = [\"ShardingOptimizerStage2\"]\n\n\nclass ShardingOptimizerStage2(Optimizer):\n \"\"\"\n A wrapper for Sharding Stage2 Optimizer in Dygraph. \n\n .. warning: ShardingOptimizer encapsulates the optimization strategy and integrates it into the optimizer.\n\n .. ZeRO: 1.https://arxiv.org/pdf/1910.02054.pdf 2.https://arxiv.org/pdf/1910.02054.pdf.\n\n \"\"\"\n\n # TODO (Baibaifan) \n # Feature Notes:\n # 1. Unified memory for parameters and parameters.grad to InternalStorage.\n # 2. Support the segmentation of optimizer parameters and partial updating of parameters.\n # 3. Dynamically adjust training parameters and models.\n # 4. Support offload function.\n # 5. Support the establishment of independent communication groups.\n # 6. Broadcast_fp16 is not supported now.\n def __init__(self,\n params,\n optim,\n group=None,\n offload=False,\n device=\"gpu\",\n pertrain_sync_models=True,\n **kw):\n\n super().__init__(optim._learning_rate, params, kw)\n\n # Segmentation information\n self._dtype_rank_params = OrderedDict(\n ) # {dtype:[param1,param2]} device, rank, params\n self._param2rank = {}\n self.__segment_params = []\n self._rank_buffer_size = {} # {dtype: {rank: numel+alignment}}\n self._param2align = {} # {param.name: align}\n\n # Default information\n self._optim_defaults = kw\n self._optim = optim\n\n assert hasattr(self._optim, \"_master_weights\"\n ), \"Must use optimizer with _master_weights attribute\"\n self._local_params = params\n self._default_device = device\n self._pfp16 = len(\n list(\n filter(lambda x: x.trainable and x.dtype == Type.fp16.value,\n self._local_params))) > 0\n\n self.group = dist.new_group(_get_global_group()\n .ranks) if group is None else group\n\n self.world_size = self.group.nranks\n self.rank = self.group.rank\n self._global_root_rank = 0\n\n # Synchronous all ranks models\n if pertrain_sync_models:\n self._sync_params_and_buffers()\n\n self.param_storages = {} # {dtype: {rank: InternalStorage}}\n\n if isinstance(self._optim._grad_clip, ClipGradByGlobalNorm):\n logging.warning(\n \"While using ClipGradByGlobalNorm in ShardingOptimizer, the grad clip of original optimizer will be changed.\"\n )\n self._optim._grad_clip = ShardingClipGrad(self._optim._grad_clip,\n paddle.get_device(),\n self.group)\n if self._optim._parameter_list and isinstance(\n self._optim._parameter_list[0], dict):\n for item in self._optim._param_groups:\n if \"grad_clip\" in item.keys():\n item[\"grad_clip\"] = ShardingClipGrad(\n self._optim._grad_clip,\n paddle.get_device(), self.group)\n\n if offload:\n assert self._pfp16, \"Only support offload strategy while using \\'Adam\\', \\'AdamW\\' and \\'Momentum\\' optimizer with AMP/Pure FP16\"\n\n self.offload = offload # Using for offload\n self.offload_device = \"cpu\"\n self.offload_buffer_size = 0\n self.offload_param2align = {}\n self.offload_params = None\n self.offload_grads = None\n\n self._master_params = {}\n\n # Update optimizer parameters and adjust parameter storage and use according to rank.\n self._update_opt_status()\n\n @paddle.no_grad()\n def _sync_params_and_buffers(self):\n \"\"\"\n Sync all model states for all ranks\n \"\"\"\n\n for p in self._local_params:\n dist.broadcast(\n p,\n src=self._global_root_rank,\n group=self.group,\n use_calc_stream=True)\n\n # Multi stream operation will be supported later\n dist.wait(tensor=p, group=self.group, use_calc_stream=True)\n\n def _generate_master_params(self, trainable_params):\n if self.offload:\n for param in trainable_params:\n if param.name not in self._master_params.keys():\n self._master_params[param.name] = core.VarBase(\n name=param.name,\n value=param.cast(dtype=Type.fp32.value).numpy(),\n place=core.CPUPlace(),\n stop_gradient=param.stop_gradient)\n else:\n for param in trainable_params:\n if param.dtype == Type.fp16.value:\n self._optim._master_weights[param.name] = paddle.cast(\n param, Type.fp32.value)\n\n def _update_opt_status(self):\n \"\"\"Update optimizer status and parameter storage information, and special functions to be developed.\n \"\"\"\n # func 1\n self._integration_params()\n\n # fun 2 TODO\n\n # Segement helpers\n\n def _segment_params(self):\n \"\"\"\n Divide all optimizer parameters equally into rank.\n \"\"\"\n if len(self.__segment_params) == 0:\n self.__segment_params, param_lists = [\n [] for _ in range(self.world_size)\n ], [[] for _ in range(self.world_size)]\n sizes = [0] * self.world_size\n for param in self._local_params:\n # Add this param to rank with smallest size.\n rank = sizes.index(min(sizes))\n param_lists[rank].append(param)\n\n # Statistical real numels\n sizes[rank] += np.prod(param.shape) if param.trainable else 0\n\n for rank, params in enumerate(param_lists):\n self.__segment_params[rank].extend(params)\n return self.__segment_params\n\n @property\n def local_params(self):\n return self._local_params\n\n @property\n def param2rank(self):\n \"\"\"Map the params to the rank which owns them\"\"\"\n if len(self._param2rank) == 0:\n for rank, params in enumerate(self._segment_params()):\n for param in params:\n self._param2rank[param.name] = rank\n return self._param2rank\n\n @property\n def dtype_rank_params(self):\n \"\"\"\n Divide the parameters into groups according to rank and dtype.\n \"\"\"\n if len(self._dtype_rank_params) == 0:\n # Assign the parameters of each rank according to the type\n for param in self._local_params:\n if param.dtype not in self._dtype_rank_params.keys():\n self._dtype_rank_params[\n param.dtype] = [[] for _ in range(self.world_size)]\n self._dtype_rank_params[param.dtype][self.param2rank[\n param.name]].append(param)\n\n # Sort per rank params by size\n for dtype in self._dtype_rank_params.keys():\n for rank_params in self._dtype_rank_params[dtype]:\n rank_params.sort(key=lambda x: np.prod(x.shape))\n\n return self._dtype_rank_params\n\n @property\n def rank_buffer_size(self):\n \"\"\"\n Count the memory size of the parameters corresponding to rank under the corresponding dtype.\n \"\"\"\n # CUDA alignment 256 bytes\n if len(self._rank_buffer_size) == 0:\n for dtype in self.dtype_rank_params.keys():\n if dtype not in self._rank_buffer_size.keys():\n self._rank_buffer_size[dtype] = {}\n for dst_rank, per_rank_params in enumerate(\n self.dtype_rank_params[dtype]):\n if dst_rank not in self._rank_buffer_size[dtype].keys():\n self._rank_buffer_size[dtype][dst_rank] = 0\n for param in per_rank_params:\n if not param.trainable:\n continue\n size = np.prod(param.shape) * align[dtype]\n remaining = size % alignment[self._default_device]\n ali = 0 if remaining == 0 else alignment[\n self._default_device] - remaining\n align_ = ali // align[dtype]\n self._rank_buffer_size[dtype][dst_rank] += np.prod(\n param.shape) + align_\n self._param2align[param.name] = align_\n\n return self._rank_buffer_size\n\n def _integration_params(self):\n \"\"\"\n Integrate the parameters into a continuous memory according to rank, and support the update of training parameters.\n \"\"\"\n\n for dtype, per_rank_params in self.dtype_rank_params.items():\n if dtype not in self.param_storages.keys():\n self.param_storages[dtype] = {}\n\n for dst_rank, params in enumerate(per_rank_params):\n if len(params) > 0:\n\n # Merge all the trainable params in a single InternalStorage\n trainable_params = list(\n filter(lambda x: x.trainable, params))\n if self._pfp16 and dst_rank == self.rank:\n self._generate_master_params(trainable_params)\n if trainable_params:\n param_storage = ParamStorage(\n size=self.rank_buffer_size[dtype][dst_rank],\n dtype=dtype,\n device=self._default_device)\n\n param_storage.add_rank_params(trainable_params,\n self._param2align)\n self.param_storages[dtype][dst_rank] = param_storage\n\n # Clear the InternalStorage keys which are not in use anymore\n dtype_in_use = list(self.dtype_rank_params.keys())\n dtype_to_pop = list(\n filter(lambda x: x not in dtype_in_use, self.param_storages.keys()))\n for d in dtype_to_pop:\n self.param_storages.pop(d)\n\n if self.offload:\n self._optim._master_weights = self._master_params\n cpu_master_params = [p for p in self._master_params.values()]\n for param in cpu_master_params:\n size = np.prod(param.shape) * align[Type.fp32.value]\n remaining = size % alignment[self.offload_device]\n ali = 0 if remaining == 0 else alignment[\n self.offload_device] - remaining\n align_ = ali // align[Type.fp32.value]\n self.offload_buffer_size += np.prod(param.shape) + align_\n self.offload_param2align[param.name] = align_\n\n if cpu_master_params:\n with device_guard(self.rank, self.offload_device):\n self.offload_params = ParamStorage(\n size=self.offload_buffer_size,\n dtype=Type.fp32.value,\n device=self.offload_device)\n self.offload_params.add_rank_params(\n cpu_master_params, self.offload_param2align, False)\n self.offload_params.buffer.stop_gradient = False\n\n self.offload_grads = GradStorage(\n size=self.offload_buffer_size,\n dtype=Type.fp32.value,\n device=self.offload_device,\n destination=self.rank,\n parm2align=self.offload_param2align,\n convert_cpu=True)\n for p in cpu_master_params:\n self.offload_grads.add_grad(\n p, self.offload_param2align[p.name])\n\n self._optim._master_weights[\n self.offload_params.buffer.\n name] = self.offload_params.buffer\n\n def _offload_acc_grad(self, param_name, grad_fp32_cpu):\n \"\"\"accumulate grads with offload strategy\"\"\"\n with device_guard(self.rank, self.offload_device):\n if param_name in self._master_params.keys():\n if self._master_params[param_name].grad is None:\n self._master_params[param_name]._copy_gradient_from(\n grad_fp32_cpu)\n else:\n self._master_params[param_name].grad.add_(grad_fp32_cpu)\n\n self.offload_params.buffer._copy_gradient_from(\n self.offload_grads.buffer)\n\n def _offload_scale_grad(self, scale_size):\n \"\"\"scale grads with offload strategy\"\"\"\n with device_guard(self.rank, self.offload_device):\n self.offload_grads.buffer.scale_(scale=scale_size)\n\n def _offload_clear_grad(self):\n \"\"\"clear grads with offload strategy\"\"\"\n with device_guard(self.rank, self.offload_device):\n self.offload_grads.buffer.zero_()\n\n def step(self):\n \"\"\"\n A wrapper for Optimizer's step function to finish the update operation of the optimizer.\n \"\"\"\n\n if self.offload:\n params_list = [self.offload_params.buffer]\n\n #TODO(Baibaifan): Offload will support param_groups later\n if not isinstance(self._optim._param_groups[0], dict):\n self._optim._parameter_list = params_list\n self._optim._param_groups = params_list\n\n # Run the optimizer of the current rank step\n if self.offload:\n with device_guard(device=self.offload_device):\n self._optim.step()\n\n dev_id = int(paddle.get_device().split(\":\")[1])\n for param in self._local_params:\n if param.name in self._master_params.keys():\n param.set_value(self._master_params[param.name].cuda(dev_id)\n .cast(dtype=param.dtype))\n else:\n self._optim.step()\n\n # Synchronize all the updated shards in between the ranks\n self._broadcast_params()\n\n def minimize(self):\n raise RuntimeError(\n \"optimizer.minimize() not support now, please use optimizer.step()\")\n\n def _clear_cache(self):\n self.__segment_params.clear()\n self._dtype_rank_params.clear()\n self._param2rank.clear()\n\n @fluid.dygraph.no_grad\n def _broadcast_params(self):\n \"\"\"Broadcast the parameters of the current rank to each rank\"\"\"\n\n assert self._default_device == \"gpu\", \"Only supported gpu\"\n\n # Exchange all the shards with the other ranks\n for dtype_per_rank in self.param_storages.values():\n for dst_rank, internal_storage in dtype_per_rank.items():\n dist.broadcast(\n tensor=internal_storage.buffer,\n src=dst_rank,\n group=self.group,\n use_calc_stream=True)\n\n # Multi stream operation will be supported later\n dist.wait(\n tensor=internal_storage.buffer,\n group=self.group,\n use_calc_stream=True)\n" ]
[ [ "numpy.prod" ] ]
mawright/flow
[ "6e3f3da04b289a3f9e754c84915b60f0689dc78d" ]
[ "flow/envs/base_env.py" ]
[ "\"\"\"Base environment class. This is the parent of all other environments.\"\"\"\n\nfrom copy import deepcopy\nimport os\nimport atexit\nimport time\nimport traceback\nimport numpy as np\nimport random\nfrom flow.renderer.pyglet_renderer import PygletRenderer as Renderer\n\nimport gym\nfrom gym.spaces import Box\nfrom traci.exceptions import FatalTraCIError\nfrom traci.exceptions import TraCIException\n\nimport sumolib\n\ntry:\n # Import serializable if rllab is installed\n from rllab.core.serializable import Serializable\n serializable_flag = True\nexcept ImportError:\n serializable_flag = False\n\nfrom flow.core.util import ensure_dir\nfrom flow.core.kernel import Kernel\nfrom flow.utils.exceptions import FatalFlowError\n\n# pick out the correct class definition\nif serializable_flag:\n classdef = (gym.Env, Serializable)\nelse:\n classdef = (gym.Env,)\n\n\nclass Env(*classdef):\n \"\"\"Base environment class.\n\n Provides the interface for controlling a SUMO simulation. Using this\n class, you can start sumo, provide a scenario to specify a\n configuration and controllers, perform simulation steps, and reset the\n simulation to an initial configuration.\n\n Env is Serializable to allow for pickling and replaying of the policy.\n\n This class cannot be used as is: you must extend it to implement an\n action applicator method, and properties to define the MDP if you\n choose to use it with an rl library (e.g. RLlib). This can be done by\n overloading the following functions in a child class:\n - action_space\n - observation_space\n - apply_rl_action\n - get_state\n - compute_reward\n\n Attributes\n ----------\n env_params : flow.core.params.EnvParams\n see flow/core/params.py\n sim_params : flow.core.params.SimParams\n see flow/core/params.py\n scenario : flow.scenarios.Scenario\n see flow/scenarios/base_scenario.py\n simulator : str\n the simulator used, one of {'traci', 'aimsun'}. Defaults to 'traci'\n \"\"\"\n\n def __init__(self, env_params, sim_params, scenario, simulator='traci'):\n # Invoke serializable if using rllab\n if serializable_flag:\n Serializable.quick_init(self, locals())\n\n self.env_params = env_params\n self.scenario = scenario\n self.sim_params = sim_params\n time_stamp = ''.join(str(time.time()).split('.'))\n if os.environ.get(\"TEST_FLAG\", 0):\n # 1.0 works with stress_test_start 10k times\n time.sleep(1.0 * int(time_stamp[-6:]) / 1e6)\n # FIXME: this is sumo-specific\n self.sim_params.port = sumolib.miscutils.getFreeSocketPort()\n # time_counter: number of steps taken since the start of a rollout\n self.time_counter = 0\n # step_counter: number of total steps taken\n self.step_counter = 0\n # initial_state:\n # Key = Vehicle ID,\n # Entry = (type_id, route_id, lane_index, lane_pos, speed, pos)\n self.initial_state = {}\n self.state = None\n self.obs_var_labels = []\n\n # simulation step size\n self.sim_step = sim_params.sim_step\n\n # the simulator used by this environment\n self.simulator = simulator\n\n # create the Flow kernel\n self.k = Kernel(simulator=self.simulator,\n sim_params=sim_params)\n\n # use the scenario class's network parameters to generate the necessary\n # scenario components within the scenario kernel\n self.k.scenario.generate_network(scenario)\n\n # initial the vehicles kernel using the VehicleParams object\n self.k.vehicle.initialize(deepcopy(scenario.vehicles))\n\n # initialize the simulation using the simulation kernel. This will use\n # the scenario kernel as an input in order to determine what network\n # needs to be simulated.\n kernel_api = self.k.simulation.start_simulation(\n scenario=self.k.scenario, sim_params=sim_params)\n\n # pass the kernel api to the kernel and it's subclasses\n self.k.pass_api(kernel_api)\n\n # the available_routes variable contains a dictionary of routes\n # vehicles can traverse; to be used when routes need to be chosen\n # dynamically\n self.available_routes = self.k.scenario.rts\n\n # store the initial vehicle ids\n self.initial_ids = deepcopy(scenario.vehicles.ids)\n\n # store the initial state of the vehicles kernel (needed for restarting\n # the simulation)\n self.k.vehicle.kernel_api = None\n self.k.vehicle.master_kernel = None\n self.initial_vehicles = deepcopy(self.k.vehicle)\n self.k.vehicle.kernel_api = self.k.kernel_api\n self.k.vehicle.master_kernel = self.k\n\n self.setup_initial_state()\n\n # use pyglet to render the simulation\n if self.sim_params.render in ['gray', 'dgray', 'rgb', 'drgb']:\n save_render = self.sim_params.save_render\n sight_radius = self.sim_params.sight_radius\n pxpm = self.sim_params.pxpm\n show_radius = self.sim_params.show_radius\n\n # get network polygons\n network = []\n # FIXME: add to scenario kernel instead of hack\n for lane_id in self.k.kernel_api.lane.getIDList():\n _lane_poly = self.k.kernel_api.lane.getShape(lane_id)\n lane_poly = [i for pt in _lane_poly for i in pt]\n network.append(lane_poly)\n\n # instantiate a pyglet renderer\n self.renderer = Renderer(\n network,\n self.sim_params.render,\n save_render,\n sight_radius=sight_radius,\n pxpm=pxpm,\n show_radius=show_radius)\n\n # render a frame\n self.render(reset=True)\n elif self.sim_params.render in [True, False]:\n pass # default to sumo-gui (if True) or sumo (if False)\n else:\n raise ValueError('Mode %s is not supported!' %\n self.sim_params.render)\n atexit.register(self.terminate)\n\n def restart_simulation(self, sim_params, render=None):\n \"\"\"Restart an already initialized simulation instance.\n\n This is used when visualizing a rollout, in order to update the\n rendering with potentially a gui and export emission data from sumo.\n\n This is also used to handle cases when the runtime of an experiment is\n too long, causing the sumo instance\n\n Parameters\n ----------\n sim_params : flow.core.params.SimParams\n simulation-specific parameters\n render: bool, optional\n specifies whether to use the gui\n \"\"\"\n self.k.close()\n\n # killed the sumo process if using sumo/TraCI\n if self.simulator == 'traci':\n self.k.simulation.sumo_proc.kill()\n\n if render is not None:\n self.sim_params.render = render\n\n if sim_params.emission_path is not None:\n ensure_dir(sim_params.emission_path)\n self.sim_params.emission_path = sim_params.emission_path\n\n self.k.scenario.generate_network(self.scenario)\n self.k.vehicle.initialize(deepcopy(self.scenario.vehicles))\n kernel_api = self.k.simulation.start_simulation(\n scenario=self.k.scenario, sim_params=self.sim_params)\n self.k.pass_api(kernel_api)\n\n self.setup_initial_state()\n\n def setup_initial_state(self):\n \"\"\"Store information on the initial state of vehicles in the network.\n\n This information is to be used upon reset. This method also adds this\n information to the self.vehicles class and starts a subscription with\n sumo to collect state information each step.\n \"\"\"\n # determine whether to shuffle the vehicles\n if self.scenario.initial_config.shuffle:\n random.shuffle(self.initial_ids)\n\n # generate starting position for vehicles in the network\n start_pos, start_lanes = self.k.scenario.generate_starting_positions(\n initial_config=self.scenario.initial_config,\n num_vehicles=len(self.initial_ids))\n\n # save the initial state. This is used in the _reset function\n for i, veh_id in enumerate(self.initial_ids):\n type_id = self.scenario.vehicles.get_type(veh_id)\n pos = start_pos[i][1]\n lane = start_lanes[i]\n speed = self.scenario.vehicles.get_initial_speed(veh_id)\n edge = start_pos[i][0]\n\n self.initial_state[veh_id] = (type_id, edge, lane, pos, speed)\n\n def step(self, rl_actions):\n \"\"\"Advance the environment by one step.\n\n Assigns actions to autonomous and human-driven agents (i.e. vehicles,\n traffic lights, etc...). Actions that are not assigned are left to the\n control of the simulator. The actions are then used to advance the\n simulator by the number of time steps requested per environment step.\n\n Results from the simulations are processed through various classes,\n such as the Vehicle and TrafficLight kernels, to produce standardized\n methods for identifying specific network state features. Finally,\n results from the simulator are used to generate appropriate\n observations.\n\n Parameters\n ----------\n rl_actions: numpy ndarray\n an list of actions provided by the rl algorithm\n\n Returns\n -------\n observation: numpy ndarray\n agent's observation of the current environment\n reward: float\n amount of reward associated with the previous state/action pair\n done: bool\n indicates whether the episode has ended\n info: dict\n contains other diagnostic information from the previous action\n \"\"\"\n for _ in range(self.env_params.sims_per_step):\n self.time_counter += 1\n self.step_counter += 1\n\n # perform acceleration actions for controlled human-driven vehicles\n if len(self.k.vehicle.get_controlled_ids()) > 0:\n accel = []\n for veh_id in self.k.vehicle.get_controlled_ids():\n action = self.k.vehicle.get_acc_controller(\n veh_id).get_action(self)\n accel.append(action)\n self.k.vehicle.apply_acceleration(\n self.k.vehicle.get_controlled_ids(), accel)\n\n # perform lane change actions for controlled human-driven vehicles\n if len(self.k.vehicle.get_controlled_lc_ids()) > 0:\n direction = []\n for veh_id in self.k.vehicle.get_controlled_lc_ids():\n target_lane = self.k.vehicle.get_lane_changing_controller(\n veh_id).get_action(self)\n direction.append(target_lane)\n self.k.vehicle.apply_lane_change(\n self.k.vehicle.get_controlled_lc_ids(),\n direction=direction)\n\n # perform (optionally) routing actions for all vehicles in the\n # network, including RL and SUMO-controlled vehicles\n routing_ids = []\n routing_actions = []\n for veh_id in self.k.vehicle.get_ids():\n if self.k.vehicle.get_routing_controller(veh_id) \\\n is not None:\n routing_ids.append(veh_id)\n route_contr = self.k.vehicle.get_routing_controller(\n veh_id)\n routing_actions.append(route_contr.choose_route(self))\n\n self.k.vehicle.choose_routes(routing_ids, routing_actions)\n\n self.apply_rl_actions(rl_actions)\n\n self.additional_command()\n\n # advance the simulation in the simulator by one step\n self.k.simulation.simulation_step()\n\n # store new observations in the vehicles and traffic lights class\n self.k.update(reset=False)\n\n # update the colors of vehicles\n if self.sim_params.render:\n self.k.vehicle.update_vehicle_colors()\n\n # crash encodes whether the simulator experienced a collision\n crash = self.k.simulation.check_collision()\n\n # stop collecting new simulation steps if there is a collision\n if crash:\n break\n\n # render a frame\n self.render()\n\n states = self.get_state()\n\n # collect information of the state of the network based on the\n # environment class used\n self.state = np.asarray(states).T\n\n # collect observation new state associated with action\n next_observation = np.copy(states)\n\n # test if the agent should terminate due to a crash\n done = crash or self.time_counter >= self.env_params.horizon * self.env_params.sims_per_step\n\n # compute the info for each agent\n infos = {}\n\n # compute the reward\n rl_clipped = self.clip_actions(rl_actions)\n reward = self.compute_reward(rl_clipped, fail=crash)\n\n return next_observation, reward, done, infos\n\n def reset(self):\n \"\"\"Reset the environment.\n\n This method is performed in between rollouts. It resets the state of\n the environment, and re-initializes the vehicles in their starting\n positions.\n\n If \"shuffle\" is set to True in InitialConfig, the initial positions of\n vehicles is recalculated and the vehicles are shuffled.\n\n Returns\n -------\n observation: numpy ndarray\n the initial observation of the space. The initial reward is assumed\n to be zero.\n \"\"\"\n # reset the time counter\n self.time_counter = 0\n\n # warn about not using restart_instance when using inflows\n if len(self.scenario.net_params.inflows.get()) > 0 and \\\n not self.sim_params.restart_instance:\n print(\n \"**********************************************************\\n\"\n \"**********************************************************\\n\"\n \"**********************************************************\\n\"\n \"WARNING: Inflows will cause computational performance to\\n\"\n \"significantly decrease after large number of rollouts. In \\n\"\n \"order to avoid this, set SumoParams(restart_instance=True).\\n\"\n \"**********************************************************\\n\"\n \"**********************************************************\\n\"\n \"**********************************************************\"\n )\n\n if self.sim_params.restart_instance or \\\n (self.step_counter > 2e6 and self.simulator != 'aimsun'):\n self.step_counter = 0\n # issue a random seed to induce randomness into the next rollout\n self.sim_params.seed = random.randint(0, 1e5)\n\n self.k.vehicle = deepcopy(self.initial_vehicles)\n self.k.vehicle.master_kernel = self.k\n # restart the sumo instance\n self.restart_simulation(self.sim_params)\n\n # perform shuffling (if requested)\n elif self.scenario.initial_config.shuffle:\n self.setup_initial_state()\n\n # clear all vehicles from the network and the vehicles class\n if self.simulator == 'traci':\n for veh_id in self.k.kernel_api.vehicle.getIDList(): # FIXME: hack\n try:\n self.k.vehicle.remove(veh_id)\n except (FatalTraCIError, TraCIException):\n pass\n\n # clear all vehicles from the network and the vehicles class\n # FIXME (ev, ak) this is weird and shouldn't be necessary\n for veh_id in list(self.k.vehicle.get_ids()):\n # do not try to remove the vehicles from the network in the first\n # step after initializing the network, as there will be no vehicles\n if self.step_counter == 0:\n continue\n try:\n self.k.vehicle.remove(veh_id)\n except (FatalTraCIError, TraCIException):\n print(\"Error during start: {}\".format(traceback.format_exc()))\n\n # reintroduce the initial vehicles to the network\n for veh_id in self.initial_ids:\n type_id, edge, lane_index, pos, speed = \\\n self.initial_state[veh_id]\n\n try:\n self.k.vehicle.add(\n veh_id=veh_id,\n type_id=type_id,\n edge=edge,\n lane=lane_index,\n pos=pos,\n speed=speed)\n except (FatalTraCIError, TraCIException):\n # if a vehicle was not removed in the first attempt, remove it\n # now and then reintroduce it\n self.k.vehicle.remove(veh_id)\n if self.simulator == 'traci':\n self.k.kernel_api.vehicle.remove(veh_id) # FIXME: hack\n self.k.vehicle.add(\n veh_id=veh_id,\n type_id=type_id,\n edge=edge,\n lane=lane_index,\n pos=pos,\n speed=speed)\n\n # advance the simulation in the simulator by one step\n self.k.simulation.simulation_step()\n\n # update the information in each kernel to match the current state\n self.k.update(reset=True)\n\n # update the colors of vehicles\n if self.sim_params.render:\n self.k.vehicle.update_vehicle_colors()\n\n # check to make sure all vehicles have been spawned\n if len(self.initial_ids) > self.k.vehicle.num_vehicles:\n missing_vehicles = list(\n set(self.initial_ids) - set(self.k.vehicle.get_ids()))\n msg = '\\nNot enough vehicles have spawned! Bad start?\\n' \\\n 'Missing vehicles / initial state:\\n'\n for veh_id in missing_vehicles:\n msg += '- {}: {}\\n'.format(veh_id, self.initial_state[veh_id])\n raise FatalFlowError(msg=msg)\n\n states = self.get_state()\n\n # collect information of the state of the network based on the\n # environment class used\n self.state = np.asarray(states).T\n\n # observation associated with the reset (no warm-up steps)\n observation = np.copy(states)\n\n # perform (optional) warm-up steps before training\n for _ in range(self.env_params.warmup_steps):\n observation, _, _, _ = self.step(rl_actions=None)\n\n # render a frame\n self.render(reset=True)\n\n return observation\n\n def additional_command(self):\n \"\"\"Additional commands that may be performed by the step method.\"\"\"\n pass\n\n def clip_actions(self, rl_actions=None):\n \"\"\"Clip the actions passed from the RL agent.\n\n Parameters\n ----------\n rl_actions : list or numpy ndarray\n list of actions provided by the RL algorithm\n\n Returns\n -------\n numpy ndarray (float)\n The rl_actions clipped according to the box\n \"\"\"\n # ignore if no actions are issued\n if rl_actions is None:\n return None\n\n # clip according to the action space requirements\n if isinstance(self.action_space, Box):\n rl_actions = np.clip(\n rl_actions,\n a_min=self.action_space.low,\n a_max=self.action_space.high)\n return rl_actions\n\n def apply_rl_actions(self, rl_actions=None):\n \"\"\"Specify the actions to be performed by the rl agent(s).\n\n If no actions are provided at any given step, the rl agents default to\n performing actions specified by SUMO.\n\n Parameters\n ----------\n rl_actions : list or numpy ndarray\n list of actions provided by the RL algorithm\n \"\"\"\n # ignore if no actions are issued\n if rl_actions is None:\n return\n\n rl_clipped = self.clip_actions(rl_actions)\n self._apply_rl_actions(rl_clipped)\n\n def _apply_rl_actions(self, rl_actions):\n raise NotImplementedError\n\n def get_state(self):\n \"\"\"Return the state of the simulation as perceived by the RL agent.\n\n MUST BE implemented in new environments.\n\n Returns\n -------\n state: numpy ndarray\n information on the state of the vehicles, which is provided to the\n agent\n \"\"\"\n raise NotImplementedError\n\n @property\n def action_space(self):\n \"\"\"Identify the dimensions and bounds of the action space.\n\n MUST BE implemented in new environments.\n\n Returns\n -------\n gym Box or Tuple type\n a bounded box depicting the shape and bounds of the action space\n \"\"\"\n raise NotImplementedError\n\n @property\n def observation_space(self):\n \"\"\"Identify the dimensions and bounds of the observation space.\n\n MUST BE implemented in new environments.\n\n Returns\n -------\n gym Box or Tuple type\n a bounded box depicting the shape and bounds of the observation\n space\n \"\"\"\n raise NotImplementedError\n\n def compute_reward(self, rl_actions, **kwargs):\n \"\"\"Reward function for the RL agent(s).\n\n MUST BE implemented in new environments.\n Defaults to 0 for non-implemented environments.\n\n Parameters\n ----------\n rl_actions: numpy ndarray\n actions performed by rl vehicles\n kwargs: dict\n other parameters of interest. Contains a \"fail\" element, which\n is True if a vehicle crashed, and False otherwise\n\n Returns\n -------\n reward: float or list <float>\n \"\"\"\n return 0\n\n def terminate(self):\n \"\"\"Close the TraCI I/O connection.\n\n Should be done at end of every experiment. Must be in Env because the\n environment opens the TraCI connection.\n \"\"\"\n try:\n print(\n \"Closing connection to TraCI and stopping simulation.\\n\"\n \"Note, this may print an error message when it closes.\"\n )\n self.k.close()\n\n # close pyglet renderer\n if self.sim_params.render in ['gray', 'dgray', 'rgb', 'drgb']:\n self.renderer.close()\n except FileNotFoundError:\n print(\"Skip automatic termination. \"\n \"Connection is probably already closed.\")\n\n def render(self, reset=False, buffer_length=5):\n \"\"\"Render a frame.\n\n Parameters\n ----------\n reset: bool\n set to True to reset the buffer\n buffer_length: int\n length of the buffer\n \"\"\"\n if self.sim_params.render in ['gray', 'dgray', 'rgb', 'drgb']:\n # render a frame\n self.pyglet_render()\n\n # cache rendering\n if reset:\n self.frame_buffer = [self.frame.copy() for _ in range(5)]\n self.sights_buffer = [self.sights.copy() for _ in range(5)]\n else:\n if self.step_counter % int(1/self.sim_step) == 0:\n self.frame_buffer.append(self.frame.copy())\n self.sights_buffer.append(self.sights.copy())\n if len(self.frame_buffer) > buffer_length:\n self.frame_buffer.pop(0)\n self.sights_buffer.pop(0)\n\n def pyglet_render(self):\n \"\"\"Render a frame using pyglet.\"\"\"\n\n # get human and RL simulation status\n human_idlist = self.k.vehicle.get_human_ids()\n machine_idlist = self.k.vehicle.get_rl_ids()\n human_logs = []\n human_orientations = []\n human_dynamics = []\n machine_logs = []\n machine_orientations = []\n machine_dynamics = []\n max_speed = self.k.scenario.max_speed()\n for id in human_idlist:\n # Force tracking human vehicles by adding \"track\" in vehicle id.\n # The tracked human vehicles will be treated as machine vehicles.\n if 'track' in id:\n machine_logs.append(\n [self.k.vehicle.get_timestep(id),\n self.k.vehicle.get_timedelta(id),\n id])\n machine_orientations.append(\n self.k.vehicle.get_orientation(id))\n machine_dynamics.append(\n self.k.vehicle.get_speed(id)/max_speed)\n else:\n human_logs.append(\n [self.k.vehicle.get_timestep(id),\n self.k.vehicle.get_timedelta(id),\n id])\n human_orientations.append(\n self.k.vehicle.get_orientation(id))\n human_dynamics.append(\n self.k.vehicle.get_speed(id)/max_speed)\n for id in machine_idlist:\n machine_logs.append(\n [self.k.vehicle.get_timestep(id),\n self.k.vehicle.get_timedelta(id),\n id])\n machine_orientations.append(\n self.k.vehicle.get_orientation(id))\n machine_dynamics.append(\n self.k.vehicle.get_speed(id)/max_speed)\n\n # step the renderer\n self.frame = self.renderer.render(human_orientations,\n machine_orientations,\n human_dynamics,\n machine_dynamics,\n human_logs,\n machine_logs)\n\n # get local observation of RL vehicles\n self.sights = []\n for id in human_idlist:\n # Force tracking human vehicles by adding \"track\" in vehicle id.\n # The tracked human vehicles will be treated as machine vehicles.\n if \"track\" in id:\n orientation = self.k.vehicle.get_orientation(id)\n sight = self.renderer.get_sight(\n orientation, id)\n self.sights.append(sight)\n for id in machine_idlist:\n orientation = self.k.vehicle.get_orientation(id)\n sight = self.renderer.get_sight(\n orientation, id)\n self.sights.append(sight)\n" ]
[ [ "numpy.asarray", "numpy.copy", "numpy.clip" ] ]
AlvinWen428/keyframe-focused-imitation-learning
[ "0aa9abb663b5351ec7dd52df87313e53e6a0d2f4" ]
[ "carla08/driving_benchmark/driving_benchmark.py" ]
[ "# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de\n# Barcelona (UAB).\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\nimport os\nimport abc\nimport logging\nimport math\nimport time\nimport numpy as np\nfrom queue import Queue\n\nfrom ..client import VehicleControl\nfrom ..client import make_carla_client\nfrom ..driving_benchmark.metrics import Metrics\nfrom ..planner.planner import Planner\nfrom ..settings import CarlaSettings\nfrom ..tcp import TCPConnectionError\nfrom configs import g_conf\n\nfrom . import results_printer\nfrom .recording import Recording\n\n\ndef get_vec_dist(x_dst, y_dst, x_src, y_src):\n vec = np.array([x_dst, y_dst] - np.array([x_src, y_src]))\n dist = math.sqrt(vec[0] ** 2 + vec[1] ** 2)\n return vec / dist, dist\n\n\ndef sldist(c1, c2):\n return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2)\n\n\nclass DrivingBenchmark(object):\n \"\"\"\n The Benchmark class, controls the execution of the benchmark interfacing\n an Agent class with a set Suite.\n\n\n The benchmark class must be inherited with a class that defines the\n all the experiments to be run by the agent\n \"\"\"\n\n def __init__(\n self,\n city_name='Town01',\n name_to_save='Test',\n continue_experiment=False,\n save_images=False,\n save_videos=False,\n save_processed_videos=False,\n policy_roll_out=False,\n distance_for_success=2.0\n ):\n \"\"\"\n Args\n city_name:\n name_to_save:\n continue_experiment:\n save_images:\n distance_for_success:\n collisions_as_failure: if this flag is set to true, episodes will terminate as failure, when the car collides.\n \"\"\"\n\n self.__metaclass__ = abc.ABCMeta\n\n self._city_name = city_name\n self._base_name = name_to_save\n # The minimum distance for arriving into the goal point in\n # order to consider ir a success\n self._distance_for_success = distance_for_success\n # The object used to record the benchmark and to able to continue after\n self._recording = Recording(name_to_save=name_to_save,\n continue_experiment=continue_experiment,\n save_images=save_images,\n save_videos=save_videos,\n save_processed_videos=save_processed_videos,\n policy_roll_out=policy_roll_out\n )\n\n # We have a default planner instantiated that produces high level commands\n self._planner = Planner(city_name)\n self._map = self._planner._city_track.get_map()\n\n # TO keep track of the previous collisions\n self._previous_pedestrian_collision = 0\n self._previous_vehicle_collision = 0\n self._previous_other_collision = 0\n\n def benchmark_agent(self, experiment_suite, agent, client):\n \"\"\"\n Function to benchmark the agent.\n It first check the log file for this benchmark.\n if it exist it continues from the experiment where it stopped.\n\n\n Args:\n experiment_suite\n agent: an agent object with the run step class implemented.\n client:\n\n\n Return:\n A dictionary with all the metrics computed from the\n agent running the set of experiments.\n \"\"\"\n\n # Instantiate a metric object that will be used to compute the metrics for\n # the benchmark afterwards.\n metrics_object = Metrics(experiment_suite.metrics_parameters,\n experiment_suite.dynamic_tasks)\n\n # Function return the current pose and task for this benchmark.\n start_pose, start_experiment, start_rep = self._recording.get_pose_experiment_rep(\n experiment_suite.get_number_of_poses_task(), experiment_suite.get_number_of_reps_poses())\n\n print(start_pose, start_experiment, start_rep)\n logging.info('START')\n\n for experiment in experiment_suite.get_experiments()[int(start_experiment):]:\n print(experiment)\n positions = client.load_settings(\n experiment.conditions).player_start_spots\n\n self._recording.log_start(experiment.task)\n\n for pose in experiment.poses[start_pose:]:\n for rep in range(start_rep, experiment.repetitions):\n\n start_index = pose[0]\n end_index = pose[1]\n print(\"start index \", start_index, \"end index\", end_index)\n client.start_episode(start_index)\n # Print information on\n logging.info('======== !!!! ==========')\n logging.info(' Start Position %d End Position %d ',\n start_index, end_index)\n\n self._recording.log_poses(start_index, end_index,\n experiment.Conditions.WeatherId,\n str(experiment.Conditions.WeatherId) + '_' + str(experiment.task) + '_'\n + str(start_index) + '.' + str(end_index))\n\n # Calculate the initial distance for this episode\n initial_distance = \\\n sldist(\n [positions[start_index].location.x, positions[start_index].location.y],\n [positions[end_index].location.x, positions[end_index].location.y])\n\n # Different from initial_distance (which is the L2 distance between start point and end point),\n # initial_path_distance is the length of the path\n initial_path_distance = self._get_shortest_path(positions[start_index], positions[end_index])\n\n time_out = experiment_suite.calculate_time_out(initial_path_distance)\n\n logging.info('Timeout for Episode: %f', time_out)\n # running the agent\n (result, reward_vec, control_vec, final_time, remaining_distance, col_ped,\n col_veh, col_oth, number_of_red_lights, number_of_green_lights) = \\\n self._run_navigation_episode(\n agent, client, time_out, positions[end_index],\n str(experiment.Conditions.WeatherId) + '_'\n + str(experiment.task) + '_' + str(start_index)\n + '.' + str(end_index), experiment_suite.metrics_parameters,\n experiment_suite.collision_as_failure,\n experiment_suite.traffic_light_as_failure,\n avoid_stop=experiment_suite.avoid_stop)\n\n self._recording.log_poses_finish()\n\n # Write the general status of the just ran episode\n self._recording.write_summary_results(\n experiment, pose, rep, initial_distance,\n remaining_distance, final_time, time_out, result, col_ped, col_veh, col_oth,\n number_of_red_lights, number_of_green_lights, initial_path_distance)\n\n # Write the details of this episode.\n self._recording.write_measurements_results(experiment, rep, pose, reward_vec,\n control_vec)\n if result > 0:\n logging.info('+++++ Target achieved in %f seconds! +++++',\n final_time)\n else:\n logging.info('----- Timeout! -----')\n\n start_rep = 0\n start_pose = 0\n\n self._recording.log_end()\n\n return metrics_object.compute(self._recording.path)\n\n def get_planned_path_distance(self, start_point, end_point):\n return self._get_shortest_path(start_point, end_point)\n\n def get_path(self):\n \"\"\"\n Returns the path were the log was saved.\n \"\"\"\n return self._recording.path\n\n def _get_directions(self, current_point, end_point):\n \"\"\"\n Class that should return the directions to reach a certain goal\n \"\"\"\n\n directions = self._planner.get_next_command(\n (current_point.location.x,\n current_point.location.y, 0.22),\n (current_point.orientation.x,\n current_point.orientation.y,\n current_point.orientation.z),\n (end_point.location.x, end_point.location.y, 0.22),\n (end_point.orientation.x, end_point.orientation.y, end_point.orientation.z))\n return directions\n\n def _get_shortest_path(self, start_point, end_point):\n \"\"\"\n Calculates the shortest path between two points considering the road netowrk\n \"\"\"\n\n return self._planner.get_shortest_path_distance(\n [\n start_point.location.x, start_point.location.y, 0.22], [\n start_point.orientation.x, start_point.orientation.y, 0.22], [\n end_point.location.x, end_point.location.y, end_point.location.z], [\n end_point.orientation.x, end_point.orientation.y, end_point.orientation.z])\n\n def _has_agent_collided(self, measurement, metrics_parameters):\n\n \"\"\"\n This function must have a certain state and only look to one measurement.\n \"\"\"\n collided_veh = 0\n collided_ped = 0\n collided_oth = 0\n\n if (measurement.collision_vehicles - self._previous_vehicle_collision) \\\n > metrics_parameters['collision_vehicles']['threshold'] / 2.0:\n collided_veh = 1\n if (measurement.collision_pedestrians - self._previous_pedestrian_collision) \\\n > metrics_parameters['collision_pedestrians']['threshold'] / 2.0:\n collided_ped = 1\n if (measurement.collision_other - self._previous_other_collision) \\\n > metrics_parameters['collision_other']['threshold'] / 2.0:\n collided_oth = 1\n\n self._previous_pedestrian_collision = measurement.collision_pedestrians\n self._previous_vehicle_collision = measurement.collision_vehicles\n self._previous_other_collision = measurement.collision_other\n\n return collided_ped, collided_veh, collided_oth\n\n def _is_traffic_light_active(self, agent, orientation):\n\n x_agent = agent.traffic_light.transform.location.x\n y_agent = agent.traffic_light.transform.location.y\n\n def search_closest_lane_point(x_agent, y_agent, depth):\n step_size = 4\n if depth > 1:\n return None\n try:\n degrees = self._map.get_lane_orientation_degrees([x_agent, y_agent, 38])\n # print (degrees)\n except:\n return None\n\n if not self._map.is_point_on_lane([x_agent, y_agent, 38]):\n # print (\" Not on lane \")\n result = search_closest_lane_point(x_agent + step_size, y_agent, depth + 1)\n if result is not None:\n return result\n result = search_closest_lane_point(x_agent, y_agent + step_size, depth + 1)\n if result is not None:\n return result\n result = search_closest_lane_point(x_agent + step_size, y_agent + step_size, depth + 1)\n if result is not None:\n return result\n result = search_closest_lane_point(x_agent + step_size, y_agent - step_size, depth + 1)\n if result is not None:\n return result\n result = search_closest_lane_point(x_agent - step_size, y_agent + step_size, depth + 1)\n if result is not None:\n return result\n result = search_closest_lane_point(x_agent - step_size, y_agent, depth + 1)\n if result is not None:\n return result\n result = search_closest_lane_point(x_agent, y_agent - step_size, depth + 1)\n if result is not None:\n return result\n result = search_closest_lane_point(x_agent - step_size, y_agent - step_size, depth + 1)\n if result is not None:\n return result\n else:\n # print(\" ON Lane \")\n if degrees < 6:\n return [x_agent, y_agent]\n else:\n return None\n\n closest_lane_point = search_closest_lane_point(x_agent, y_agent, 0)\n car_direction = math.atan2(orientation.y, orientation.x) + 3.1415\n if car_direction > 6.0:\n car_direction -= 6.0\n\n return math.fabs(car_direction -\n self._map.get_lane_orientation_degrees([closest_lane_point[0], closest_lane_point[1], 38])\n ) < 1\n\n def _test_for_traffic_lights(self, measurement):\n \"\"\"\n\n This function tests if the car passed into a traffic light, returning 'red'\n if it crossed a red light , 'green' if it crossed a green light or none otherwise\n\n Args:\n measurement: all the measurements collected by carla 0.8.4\n\n Returns:\n\n \"\"\"\n\n def is_on_burning_point(_map, location):\n\n # We get the current lane orientation\n ori_x, ori_y = _map.get_lane_orientation([location.x, location.y, 38])\n\n # We test to walk in direction of the lane\n future_location_x = location.x\n future_location_y = location.y\n\n for i in range(3):\n future_location_x += ori_x\n future_location_y += ori_y\n # Take a point on a intersection in the future\n location_on_intersection_x = future_location_x + 2 * ori_x\n location_on_intersection_y = future_location_y + 2 * ori_y\n\n if not _map.is_point_on_intersection([future_location_x,\n future_location_y,\n 38]) and \\\n _map.is_point_on_intersection([location_on_intersection_x,\n location_on_intersection_y,\n 38]):\n return True\n\n return False\n\n # Check nearest traffic light with the correct orientation state.\n\n player_x = measurement.player_measurements.transform.location.x\n player_y = measurement.player_measurements.transform.location.y\n\n # The vehicle is on an intersection\n # THIS IS THE PLACE TO VERIFY FOR A TL BURN\n\n for agent in measurement.non_player_agents:\n if agent.HasField('traffic_light'):\n if not self._map.is_point_on_intersection([player_x, player_y, 38]):\n x_agent = agent.traffic_light.transform.location.x\n y_agent = agent.traffic_light.transform.location.y\n tl_vector, tl_dist = get_vec_dist(x_agent, y_agent, player_x, player_y)\n if self._is_traffic_light_active(agent,\n measurement.player_measurements.\n transform.orientation):\n if is_on_burning_point(self._map,\n measurement.player_measurements.transform.location) \\\n and tl_dist < 6.0:\n if agent.traffic_light.state != 0: # Not green\n return 'red'\n else:\n return 'green'\n\n return None\n\n def _run_navigation_episode(\n self,\n agent,\n client,\n time_out,\n target,\n episode_name,\n metrics_parameters,\n collision_as_failure,\n traffic_light_as_failure,\n avoid_stop=True):\n \"\"\"\n Run one episode of the benchmark (Pose) for a certain agent.\n\n\n Args:\n agent: the agent object\n client: an object of the carla client to communicate\n with the CARLA simulator\n time_out: the time limit to complete this episode\n target: the target to reach\n episode_name: The name for saving images of this episode\n metrics_object: The metrics object to check for collisions\n\n \"\"\"\n\n # Send an initial command.\n measurements, sensor_data = client.read_data()\n client.send_control(VehicleControl())\n\n initial_timestamp = measurements.game_timestamp\n current_timestamp = initial_timestamp\n\n # The vector containing all measurements produced on this episode\n measurement_vec = []\n # The vector containing all controls produced on this episode\n control_vec = []\n # support testing when input is a stack of frames\n # Here I use a queue to save the frame sequence\n original_image_queue = Queue(maxsize=g_conf.NUMBER_FRAMES_FUSION)\n if g_conf.NUMBER_PREVIOUS_ACTIONS > 0:\n previous_actions_queue = Queue(maxsize=g_conf.NUMBER_PREVIOUS_ACTIONS * 3)\n frame = 0\n distance = 10000\n stuck_counter = 0\n pre_x = 0.0\n pre_y = 0.0\n col_ped, col_veh, col_oth = 0, 0, 0\n traffic_light_state, number_red_lights, number_green_lights = None, 0, 0\n fail = False\n success = False\n is_time_out = False\n not_count = 0\n\n while not fail and not success:\n # Read data from server with the client\n measurements, sensor_data = client.read_data()\n # The directions to reach the goal are calculated.\n directions = self._get_directions(measurements.player_measurements.transform, target)\n\n if not original_image_queue.empty():\n original_image_queue.get()\n\n # support testing when input previous actions\n if g_conf.NUMBER_PREVIOUS_ACTIONS > 0:\n if frame == 0:\n while not previous_actions_queue.full():\n previous_actions_queue.put(0.0)\n previous_actions_queue.put(0.0)\n previous_actions_queue.put(0.0)\n # Agent process the data.\n if g_conf.NUMBER_PREVIOUS_ACTIONS > 0:\n control, original_image = \\\n agent.run_step(measurements, sensor_data, list(original_image_queue.queue),\n directions, target, previous_actions_list=list(previous_actions_queue.queue),\n avoid_stop=avoid_stop)\n else:\n control, original_image = \\\n agent.run_step(measurements, sensor_data, list(original_image_queue.queue),\n directions, target, avoid_stop=avoid_stop)\n # Send the control commands to the vehicle\n client.send_control(control)\n\n # Put the original and processed images into the Queue\n while not original_image_queue.full():\n original_image_queue.put(original_image)\n\n # save images if the flag is activated\n self._recording.save_images(sensor_data, episode_name, frame)\n\n # save videos if the flag is activated\n self._recording.write_video(sensor_data)\n\n # save measurements and controls if the flag 'policy_roll_out' is activated\n self._recording.policy_roll_out(measurements, control, episode_name, frame, directions, target)\n\n current_x = measurements.player_measurements.transform.location.x\n current_y = measurements.player_measurements.transform.location.y\n\n logging.info(\"Controller is Inputting:\")\n logging.info('Steer = %f Throttle = %f Brake = %f ',\n control.steer, control.throttle, control.brake)\n\n current_timestamp = measurements.game_timestamp\n logging.info('Timestamp %f', current_timestamp)\n # Get the distance travelled until now\n\n distance = sldist([current_x, current_y],\n [target.location.x, target.location.y])\n # Write status of the run on verbose mode\n logging.info('Status:')\n logging.info(\n '[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f',\n float(distance), current_x, current_y, target.location.x,\n target.location.y)\n # Check if reach the target\n col_ped, col_veh, col_oth = self._has_agent_collided(measurements.player_measurements,\n metrics_parameters)\n # test if car crossed the traffic light\n traffic_light_state = self._test_for_traffic_lights(measurements)\n\n if traffic_light_state == 'red' and not_count == 0:\n number_red_lights += 1\n not_count = 20\n\n elif traffic_light_state == 'green' and not_count == 0:\n number_green_lights += 1\n not_count = 20\n\n else:\n not_count -= 1\n not_count = max(0, not_count)\n\n if sldist([current_x, current_y], [pre_x, pre_y]) < 0.1:\n stuck_counter += 1\n else:\n stuck_counter = 0\n pre_x = current_x\n pre_y = current_y\n\n if distance < self._distance_for_success:\n success = True\n elif (current_timestamp - initial_timestamp) > (time_out * 1000):\n is_time_out = True\n fail = True\n elif collision_as_failure and (col_ped or col_veh or col_oth):\n fail = True\n elif traffic_light_as_failure and traffic_light_state == 'red':\n fail = True\n logging.info('Traffic Lights:')\n logging.info(\n 'red %f green %f, total %f',\n number_red_lights, number_green_lights, number_red_lights + number_green_lights)\n # Increment the vectors, pop the sensor data queue and append the measurements and controls.\n frame += 1\n\n if g_conf.NUMBER_PREVIOUS_ACTIONS > 0:\n [previous_actions_queue.get() for i in range(3)]\n previous_actions_queue.put(control.steer)\n previous_actions_queue.put(control.throttle)\n previous_actions_queue.put(control.brake)\n measurement_vec.append(measurements.player_measurements)\n control_vec.append(control)\n\n if is_time_out:\n final_time = time_out\n else:\n final_time = float(current_timestamp - initial_timestamp) / 1000.0\n\n if success:\n return 1, measurement_vec, control_vec, final_time, distance, col_ped, col_veh, col_oth, \\\n number_red_lights, number_green_lights\n return 0, measurement_vec, control_vec, final_time, distance, col_ped, col_veh, col_oth, \\\n number_red_lights, number_green_lights\n\n\ndef run_driving_benchmark(agent,\n experiment_suite,\n city_name='Town01',\n log_name='Test',\n continue_experiment=False,\n save_images=False,\n save_videos=True,\n save_processed_videos=False,\n policy_roll_out=False,\n host='127.0.0.1',\n port=2000\n ):\n while True:\n try:\n\n with make_carla_client(host, port, timeout=50) as client:\n # Hack to fix for the issue 310, we force a reset, so it does not get\n # the positions on first server reset.\n client.load_settings(CarlaSettings())\n client.start_episode(0)\n\n # We instantiate the driving benchmark, that is the engine used to\n # benchmark an agent. The instantiation starts the log process, sets\n\n benchmark = DrivingBenchmark(city_name=city_name,\n name_to_save=log_name + '_'\n + type(experiment_suite).__name__\n + '_' + city_name,\n save_images=save_images,\n save_videos=save_videos,\n save_processed_videos=save_processed_videos,\n policy_roll_out=policy_roll_out,\n continue_experiment=continue_experiment)\n # This function performs the benchmark. It returns a dictionary summarizing\n # the entire execution.\n benchmark_summary = benchmark.benchmark_agent(experiment_suite, agent, client)\n\n print(\"\")\n print(\"\")\n print(\"----- Printing results for training weathers (Seen in Training) -----\")\n print(\"\")\n print(\"\")\n results_printer.print_summary(benchmark_summary, experiment_suite.train_weathers,\n benchmark.get_path())\n\n print(\"\")\n print(\"\")\n print(\"----- Printing results for test weathers (Unseen in Training) -----\")\n print(\"\")\n print(\"\")\n\n results_printer.print_summary(benchmark_summary, experiment_suite.test_weathers,\n benchmark.get_path())\n\n break\n\n except TCPConnectionError as error:\n logging.error(error)\n time.sleep(1)\n" ]
[ [ "numpy.array" ] ]
psindhuja98/analyzing-weather-dataset
[ "f9d5fd6568669c96cf6fc5fdd865af9495ce484b" ]
[ "code.py" ]
[ "# --------------\n#Importing the modules\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy.stats import mode \r\n\r\n\r\n#Code for categorical variable\r\ndef categorical(df):\r\n \"\"\" Extract names of categorical column\r\n \r\n This function accepts a dataframe and returns categorical list,\r\n containing the names of categorical columns(categorical_var).\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe from which the columns name will be extracted\r\n \r\n Returns:\r\n categorical_var - List of categorical features\r\n \"\"\"\r\n categorical_var = df.select_dtypes(include = 'object')\r\n return categorical_var.columns\r\n \r\n\r\n\r\n#Code for numerical variable\r\ndef numerical(df):\r\n \"\"\" Extract names of numerical column\r\n \r\n This function accepts a dataframe and returns numerical list,\r\n containing the names of numerical columns(numerical_var).\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe from which the columns name will be extracted\r\n \r\n Returns:\r\n numerical_var - List of numerical features\r\n \"\"\"\r\n numerical_var = df.select_dtypes(include = 'number')\r\n return numerical_var.columns\r\n\r\n\r\n\r\n#code to check distribution of variable\r\ndef clear(df,col,val):\r\n \"\"\" Check distribution of variable\r\n \r\n This function accepts a dataframe,column(feature) and value which returns count of the value,\r\n containing the value counts of a variable(value_counts)\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe\r\n col - Feature of the datagrame\r\n val - value of the feature\r\n \r\n Returns:\r\n value_counts - Value count of the feature \r\n \"\"\"\r\n return df[col].value_counts()[val]\r\n \r\n\r\n\r\n\r\n#Code to check instances based on the condition\r\ndef instances_based_condition(df,col1,val1,col2,val2):\r\n \"\"\" Instances based on the condition\r\n \r\n This function accepts a dataframe, 2 columns(feature) and 2 values which returns the dataframe\r\n based on the condition.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n col1 - First feature of the dataframe on which you want to apply the filter\r\n val1 - Value to be filtered on the first feature\r\n col2 - Second feature of the dataframe on which you want to apply the filter\r\n val2 - Value to be filtered on second feature\r\n \r\n Returns:\r\n instance - Generated dataframe\r\n \"\"\"\r\n instance = df[(df[col1] > val1) & (df[col2] == val2)]\r\n return instance\r\n \r\n\r\n\r\n\r\n# Code to calculate different aggreagted values according to month\r\n\r\ndef agg_values_ina_month(df,date_col,agg_col, agg):\r\n \"\"\" Aggregate values according to month\r\n \r\n This function accepts a dataframe, 2 columns(feature) and aggregated funcion(agg) which returns the Pivot \r\n table with different aggregated value of the feature with an index of the month.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n date_col - Date feature of the dataframe on which you want to apply to_datetime conversion\r\n agg_col - Feature of the dataframe on which values will be aggregated.\r\n agg - Dictionary of aggregate functions with feature as the key and func as the value\r\n \r\n Returns:\r\n aggregated_value - Generated pivot table\r\n \"\"\"\r\n df[date_col] = pd.to_datetime(df[date_col],format = \"%Y-%m-%d %H:%M:%S\")\r\n aggregated_value = pd.pivot_table(df, index = df[date_col].dt.month, values = agg_col, aggfunc = agg)\r\n return aggregated_value\r\n\r\n\r\n\r\n# Code to group values based on the feature\r\ndef group_values(df,col1,agg1):\r\n \"\"\" Agrregate values by grouping\r\n \r\n This function accepts a dataframe, 1 column(feature) and aggregated function(agg1) which groupby the \r\n datframe based on the column.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n col1 - Feature of the dataframe on which values will be aggregated.\r\n agg1 - Dictionary of aggregate functions with feature as the key and func as the value\r\n \r\n Returns:\r\n grouping - Dataframe with all columns on which it is grouped on.\r\n \"\"\"\r\n grouping = df.groupby(col1).agg(agg1)\r\n return grouping\r\n\r\n\r\n\r\n# function for conversion \r\ndef convert(df,celsius):\r\n \"\"\" Convert temperatures from celsius to fahrenhheit\r\n \r\n This function accepts a dataframe, 1 column(feature) which returns the dataframe with converted values from \r\n celsius to fahrenhheit.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n celsius - Temperature feature of the dataframe which you want to convert to fahrenhheit\r\n \r\n Returns:\r\n converted_temp - Generated dataframe with Fahrenhheit temp.\r\n \r\n \"\"\"\r\n converted_temp=(df[celsius]*9/5)+32\r\n \r\n return converted_temp\r\n\r\n# Load the weather_2012 data csv file and store it in weather variable. The path of the dataset has been stored in the variable `path` for you.\r\nweather = pd.read_csv(path)\r\nweather.head()\r\n\r\n\r\n# As you have now loaded the weather data you might want to check the categorical and numerical variables. You can check it by calling categorical and numerical function. \r\nprint(\"Categorical variables in our Weather Dataset are : \",categorical(weather))\r\nprint(\"Numerical variables in our Weather Dataset are : \",numerical(weather))\r\n\r\n\r\n\r\n#You might be interested in checking the distribution of a specific value like the number of times the weather was exactly Cloudy in the given column. Feel free to check on other values.\r\n#You can check it by calling the function clear with respective parameters.\r\n#By using index of the value or name of the value you can check the number of count\r\nprint(\"The number of times the weather was cloudy in 2012: \", clear(weather,'Weather','Cloudy'))\r\n\r\n\r\n\r\n\r\n\r\n# Now suppose you want to check some instances based on a specific condition like when the wind speed was above 35 and visibility was 25. You can dicretly check it by calling the function instances_based_condition with respective parameters.\r\nwind_speed_35_vis_25 = instances_based_condition(weather,'Wind Spd (km/h)', 35, 'Visibility (km)', 25)\r\nwind_speed_35_vis_25.head(5)\r\n\r\n\r\n\r\n#You have temperature data and want to calculate the mean temperature recorded by month.You can generate a pivot table which contains the aggregated values(like mean, max ,min, sum, len) recoreded by month. \r\n#You can call the function agg_values_ina_month with respective parameters. \r\nprint(\"Mean temperature recorded by month: \\n\", agg_values_ina_month(weather,'Date/Time','Temp (C)','mean'))\r\n\r\n\r\n# To groupby based on a column like you want to groupby on Weather column and then aggregate the mean values of each column for different types of weather using mean. You can call the function group_values.\r\n# Feel free to try on diffrent aggregated functions like max, min, sum, len\r\nmean_weather = group_values(weather,'Weather', ['mean'])\r\nmean_weather.head(5)\r\n\r\n# You have a temperature data and wanted to convert celsius temperature into fahrehheit temperatures you can call the function convert.\r\nprint(convert(weather, 'Temp (C)'))\r\n\r\n\n\n\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.pivot_table" ] ]
awilliamson1889/Simple-SPAM-classificator
[ "4094ea1daa18bac71894158e2105d0e8398c3bd9" ]
[ "src/email_classifier.py" ]
[ "\"\"\"Email classifier\"\"\"\nimport os.path\nimport sys\nfrom joblib import dump\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom reader import DSReader\n\n\nsys.path.append('src')\n\ndataset_path = os.path.abspath(\"tests/datasets/emails.csv\")\n\ntry:\n emails_data = DSReader(dataset_path)\nexcept FileNotFoundError:\n print(\"Dataset not found.\")\n\nprint(\"Please wait model fitting.\")\n\nDSReader.dataset_cleaning(emails_data)\n\nX, y = emails_data.vectorize()\n\npipeline = Pipeline([('vect', CountVectorizer(tokenizer=DSReader.tokenize)),\n ('clf', MultinomialNB())])\n\npipeline.fit(X.ravel(), y)\n\nfile_name = os.path.abspath('models/MultinomialNB_finalized_model.sav')\ndump(pipeline, file_name)\n\nprint('Fit process successful ending!')\n" ]
[ [ "sklearn.feature_extraction.text.CountVectorizer", "sklearn.naive_bayes.MultinomialNB" ] ]
brodderickrodriguez/strategy_learning_system
[ "62d91c5ba19d41ba6768210638d26f83e56166b9" ]
[ "examples/prey_pred_model.py" ]
[ "# Brodderick Rodriguez\n# Auburn University - CSSE\n# 27 Aug. 2019\n\nimport strategy_learning_system as sls\nimport numpy as np\nimport sys\n\n# for bcr \nif sys.platform == 'darwin':\n\tROOT = '/Users/bcr/Dropbox/Projects'\n\tNETLOGO_HOME = '/Applications/NetLogo-6.0.4/'\n\nelse:\n\tROOT = '/home/bcr/Dropbox/Projects'\n\tNETLOGO_HOME = '/home/bcr/apps/NetLogo 6.0.4/'\n\nMODEL_DIR = ROOT + '/CODE/NetLogo/prey_predator_nlogo'\nMODEL_FILE_NAME = 'preypred.nlogo'\nMEDIATOR_NAME = 'preypred'\nSAVE_LOC = ROOT + '/data/sls_data'\n\n\ndef define_feature_model():\n\tinitial_number_wolves = sls.IntegerParameter('initial-number-wolves', 1, 250)\n\tinitial_number_sheep = sls.IntegerParameter('initial-number-sheep', 1, 250)\n\tgrass_regrowth_time = sls.IntegerParameter('grass-regrowth-time', 1, 100)\n\tsheep_gain_food = sls.IntegerParameter('sheep-gain-from-food', 1, 50)\n\twolf_gain_food = sls.IntegerParameter('wolf-gain-from-food', 1, 100)\n\tsheep_reproduce = sls.IntegerParameter('sheep-reproduce', 1, 20)\n\twolf_reproduce = sls.IntegerParameter('wolf-reproduce', 1, 20)\n\n\tsheep_outcome = sls.TimeSeriesOutcome('sheep')\n\twolves_outcome = sls.TimeSeriesOutcome('wolves')\n\tgrass_outcome = sls.TimeSeriesOutcome('grass')\n\tticks_outcome = sls.TimeSeriesOutcome('ticks')\n\n\tenvironmental_uncertainties = [grass_regrowth_time, sheep_gain_food, wolf_gain_food, sheep_reproduce, wolf_reproduce]\n\tmodel_uncertainties = [initial_number_wolves, initial_number_sheep]\n\toutcomes = [sheep_outcome, wolves_outcome, grass_outcome, ticks_outcome]\n\n\tfeature_model = sls.FeatureModel()\n\tfeature_model.environmental_uncertainties = environmental_uncertainties\n\tfeature_model.model_uncertainties = model_uncertainties\n\tfeature_model.outcomes = outcomes\n\n\treturn feature_model\n\n\ndef create():\n\tmed = sls.ModelMediator(name=MEDIATOR_NAME)\n\tmed.model = (MODEL_DIR, MODEL_FILE_NAME)\n\tmed.netlogo = (NETLOGO_HOME, '6.0')\n\tmed.feature_model = define_feature_model()\n\tmed.save_location = SAVE_LOC\n\tmed.save()\n\treturn med \t\n\n\ndef reward_function_1(outcome_keys, outcomes):\n\tMAX_TICK_ALLOWED = 50\n\trewards = np.zeros((outcomes.shape[0]))\n\n\tfor i, experiment_outcomes in enumerate(outcomes):\n\t\td = {key: exp_out for key, exp_out in zip(outcome_keys, experiment_outcomes)}\n\n\t\tmax_tick = np.max(d['ticks'])\n\t\twolves_pop_std = np.std(d['wolves'])\n\t\tsheep_pop_std = np.std(d['sheep'])\n\t\tgrass_pop_std = np.std(d['grass'])\n\n\t\trho = wolves_pop_std + sheep_pop_std + grass_pop_std + (MAX_TICK_ALLOWED - max_tick)\n\t\trewards[i] = (1.0 / rho) * 1000\n\n\treturn rewards\n\n\ndef create_context1(mediator):\n\tcxt1_resolution = mediator.feature_model.outcomes\n\tcxt1_resolution.append(mediator.feature_model['sheep-gain-from-food'])\n\tcxt1_resolution.append(mediator.feature_model['wolf-gain-from-food'])\n\tcxt1_resolution.append(mediator.feature_model['initial-number-sheep'])\n\tcxt1_resolution.append(mediator.feature_model['initial-number-wolves'])\n\n\tcxt = sls.Context(name='context1')\n\tcxt.reward_function = reward_function_1\n\tcxt.resolution_model = cxt1_resolution\n\tcxt.bins = np.linspace(0.0, 1.0, 3)\n\n\tcxt.num_experiments = 5\n\tcxt.num_replications = 10\n\tcxt.max_run_length = 50\n\tcxt.num_processes = 3\n\n\treturn cxt\n\n\n\n\n\n# mediator = create()\nmediator = sls.ModelMediator.load(root_dir_path=(SAVE_LOC + '/' + MEDIATOR_NAME))\n# mediator.save()\n\n# cxt1 = create_context1(mediator)\ncxt1 = mediator['context1']\n# print(cxt1)\n\n# mediator.evaluate_context(cxt1)\n# print(cxt1.processed_exploratory_results)\n\nmediator.learn(cxt1)\nprint(cxt1.processed_learned_data)\n\n\n\n\n\n\n\n" ]
[ [ "numpy.max", "numpy.std", "numpy.zeros", "numpy.linspace" ] ]
miyamotost/hand_object_detector
[ "34c8c6ad53306d4a12c12857e71bcd73bd6a68bf" ]
[ "lib/datasets/voc_eval.py" ]
[ "# --------------------------------------------------------\n# Fast/er R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Bharath Hariharan\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport xml.etree.ElementTree as ET\nimport os, sys, pdb, math\nimport pickle\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\n\n#sys.path.append('/y/dandans/Hand_Object_Detection/faster-rcnn.pytorch/lib/model/utils')\n# from lib.datasets.viz_hand_obj_debug import *\n\ndef parse_rec(filename):\n \"\"\" Parse a PASCAL VOC xml file \"\"\"\n tree = ET.parse(filename)\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['name'] = obj.find('name').text\n obj_struct['pose'] = obj.find('pose').text\n obj_struct['truncated'] = int(obj.find('truncated').text)\n obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\n int(bbox.find('ymin').text),\n int(bbox.find('xmax').text),\n int(bbox.find('ymax').text)]\n\n obj_struct['handstate'] = 0 if obj.find('contactstate').text is None else int(obj.find('contactstate').text)\n obj_struct['leftright'] = 0 if obj.find('handside').text is None else int(obj.find('handside').text)\n\n\n obj_struct['objxmin'] = None if obj.find('objxmin').text in [ None, 'None'] else float(obj.find('objxmin').text)\n obj_struct['objymin'] = None if obj.find('objymin').text in [ None, 'None'] else float(obj.find('objymin').text)\n obj_struct['objxmax'] = None if obj.find('objxmax').text in [ None, 'None'] else float(obj.find('objxmax').text)\n obj_struct['objymax'] = None if obj.find('objymax').text in [ None, 'None'] else float(obj.find('objymax').text)\n\n if obj_struct['objxmin'] is not None and obj_struct['objymin'] is not None and obj_struct['objxmax'] is not None and obj_struct['objymax'] is not None:\n obj_struct['objectbbox'] = [obj_struct['objxmin'], obj_struct['objymin'], obj_struct['objxmax'], obj_struct['objymax']]\n else:\n obj_struct['objectbbox'] = None\n\n\n\n objects.append(obj_struct) \n\n return objects\n\n\ndef voc_ap(rec, prec, use_07_metric=False):\n \"\"\" ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\n\n\n'''\n@description: raw evaluation for fasterrcnn\n'''\ndef voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n cachedir,\n ovthresh=0.5,\n use_07_metric=False):\n \"\"\"rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n [use_07_metric])\n Top level function that does the PASCAL VOC evaluation.\n detpath: Path to detections\n detpath.format(classname) should produce the detection results file.\n annopath: Path to annotations\n annopath.format(imagename) should be the xml annotations file.\n imagesetfile: Text file containing the list of images, one image per line.\n classname: Category name (duh)\n cachedir: Directory for caching the annotations\n [ovthresh]: Overlap threshold (default = 0.5)\n [use_07_metric]: Whether to use VOC07's 11 point AP computation\n (default False)\n \"\"\"\n # assumes detections are in detpath.format(classname)\n # assumes annotations are in annopath.format(imagename)\n # assumes imagesetfile is a text file with each line an image name\n # cachedir caches the annotations in a pickle file\n\n print(f'\\n\\n thd = {ovthresh}\\n\\n')\n\n # first load gt\n if not os.path.isdir(cachedir):\n os.mkdir(cachedir)\n cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile)\n # read list of images\n with open(imagesetfile, 'r') as f:\n lines = f.readlines()\n imagenames = [x.strip() for x in lines]\n\n if not os.path.isfile(cachefile):\n # load annotations\n recs = {}\n for i, imagename in enumerate(imagenames):\n recs[imagename] = parse_rec(annopath.format(imagename))\n if i % 100 == 0:\n print('Reading annotation for {:d}/{:d}'.format(\n i + 1, len(imagenames)))\n # save\n print('Saving cached annotations to {:s}'.format(cachefile))\n with open(cachefile, 'wb') as f:\n pickle.dump(recs, f)\n else:\n # load\n with open(cachefile, 'rb') as f:\n try:\n recs = pickle.load(f)\n except:\n recs = pickle.load(f, encoding='bytes')\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'].lower() == classname]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n\n # read dets\n detfile = detpath.format(classname)\n with open(detfile, 'r') as f:\n lines = f.readlines()\n\n splitlines = [x.strip().split(' ') for x in lines]\n image_ids = [x[0] for x in splitlines]\n confidence = np.array([float(x[1]) for x in splitlines])\n BB = np.array([[float(z) for z in x[2:2+4]] for x in splitlines])\n\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n\n if BB.shape[0] > 0:\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(BBGT[:, 0], bb[0])\n iymin = np.maximum(BBGT[:, 1], bb[1])\n ixmax = np.minimum(BBGT[:, 2], bb[2])\n iymax = np.minimum(BBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +\n (BBGT[:, 2] - BBGT[:, 0] + 1.) *\n (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n\n\n return rec, prec, ap\n\n\n\n\n'''\n@description: eval hands\n@compare: hand_bbox, object_bbox, state, side\nTODO:\n(1) prepare gt and det of hand --> (image_path, score, handbbox, state, side, objectbbox)\n'''\ndef voc_eval_hand(detpath,\n annopath,\n imagesetfile,\n classname,\n cachedir,\n ovthresh=0.5,\n use_07_metric=False,\n constraint=''\n ):\n \"\"\"rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n [use_07_metric])\n Top level function that does the PASCAL VOC evaluation.\n detpath: Path to detections\n detpath.format(classname) should produce the detection results file.\n annopath: Path to annotations\n annopath.format(imagename) should be the xml annotations file.\n imagesetfile: Text file containing the list of images, one image per line.\n classname: Category name (duh)\n cachedir: Directory for caching the annotations\n [ovthresh]: Overlap threshold (default = 0.5)\n [use_07_metric]: Whether to use VOC07's 11 point AP computation\n (default False)\n [constraint]:[handstate, handside, objectbbox]\n \"\"\"\n # assumes detections are in detpath.format(classname)\n # assumes annotations are in annopath.format(imagename)\n # assumes imagesetfile is a text file with each line an image name\n # cachedir caches the annotations in a pickle file\n\n # ------------------------------------------\n # cachefile = test.txt_annots.pkl\n # imagesetfile = test.txt\n # annopath.format(imagename): filename in Annotations, eg. xxxx.xml\n # detpath = comp4_det_test_{classname}.txt: path, score, bbox, state. vector, side, xxx\n\n\n print(f'\\n\\n*** current overlap thd = {ovthresh}')\n print(f'*** current constraint = {constraint}')\n assert constraint in ['', 'handstate', 'handside', 'objectbbox', 'all']\n\n # first load gt\n if not os.path.isdir(cachedir):\n os.mkdir(cachedir)\n cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile) # cachefile = test.txt_annots.pkl\n # read list of images\n with open(imagesetfile, 'r') as f: \n lines = f.readlines()\n imagenames = [x.strip() for x in lines]\n\n if not os.path.isfile(cachefile):\n # load annotations\n recs = {}\n for i, imagename in enumerate(imagenames):\n recs[imagename] = parse_rec(annopath.format(imagename))\n if i % 100 == 0:\n print('Reading annotation for {:d}/{:d}'.format(\n i + 1, len(imagenames)))\n # save\n print('Saving cached annotations to {:s}'.format(cachefile))\n with open(cachefile, 'wb') as f:\n pickle.dump(recs, f)\n else:\n # load\n with open(cachefile, 'rb') as f:\n try:\n recs = pickle.load(f)\n except:\n recs = pickle.load(f, encoding='bytes')\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'].lower() == classname]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n handstate = np.array([x['handstate'] for x in R]).astype(np.int)\n leftright = np.array([x['leftright'] for x in R]).astype(np.int)\n objectbbox = np.array([x['objectbbox'] for x in R])\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'handstate': handstate,\n 'leftright':leftright,\n 'objectbbox':objectbbox,\n 'det': det}\n\n\n # ======== hand all det ======== #\n BB_det_object, image_ids_object, detfile_object = extract_BB(detpath, extract_class='targetobject')\n BB_det_hand, image_ids_hand, detfile_hand = extract_BB(detpath, extract_class='hand')\n \n ho_dict = make_hand_object_dict(BB_det_object, BB_det_hand, image_ids_object, image_ids_hand)\n hand_det_res = gen_det_result(ho_dict) # [image_path, score, handbbox, state, vector, side, objectbbox, objectbbox_score]\n\n # print(f'det len: obj-bbox={len(BB_det_object)}, obj_image={len(image_ids_object)}, {detfile_object}')\n # print(f'det len: hand-bbox={len(BB_det_hand)}, hand_image={len(image_ids_hand)}, {detfile_hand}')\n # print('\\n\\n\\n\\n')\n # pdb.set_trace() \n # for key, val in ho_dict.items():\n # print(key, val, '\\n\\n\\n')\n # ============================= #\n\n image_ids = [x[0] for x in hand_det_res]\n confidence = np.array([x[1] for x in hand_det_res])\n BB_det = np.array([[float(z) for z in x[2]] for x in hand_det_res])\n handstate_det = np.array([int(x[3]) for x in hand_det_res]) # get handstate\n leftright_det = np.array([int(x[5]) for x in hand_det_res]) # get leftright\n objectbbox_det = [ x[6] for x in hand_det_res]\n objectbbox_score_det = [ x[7] for x in hand_det_res]\n \n\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n\n if BB_det.shape[0] > 0:\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n\n # ======== det ======== #\n image_ids = [image_ids[x] for x in sorted_ind]\n confidence_det = [confidence[x] for x in sorted_ind]\n BB_det = BB_det[sorted_ind, :]\n handstate_det = handstate_det[sorted_ind]\n leftright_det = leftright_det[sorted_ind]\n objectbbox_det = [objectbbox_det[x] for x in sorted_ind] #objectbbox_det[sorted_ind, :]\n objectbbox_score_det = [objectbbox_score_det[x] for x in sorted_ind] #objectbbox_det[sorted_ind, :]\n # ============================= #\n \n\n # go down dets and mark TPs and FPs\n for d in range(nd):\n\n # det\n image_id_det = image_ids[d]\n score_det = confidence_det[d]\n bb_det = BB_det[d, :].astype(float)\n hstate_det = handstate_det[d].astype(int)\n hside_det = leftright_det[d].astype(int)\n objbbox_det = objectbbox_det[d]#.astype(float)\n objbbox_score_det = objectbbox_score_det[d]\n #print(f'debug hand-obj: {bb_det} {objbbox_det}')\n\n # gt\n ovmax = -np.inf\n R = class_recs[image_ids[d]]\n BBGT = R['bbox'].astype(float)\n hstate_GT = R['handstate'].astype(int)\n hside_GT = R['leftright'].astype(int)\n objbbox_GT = R['objectbbox']#.astype(float)\n\n \n\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(BBGT[:, 0], bb_det[0])\n iymin = np.maximum(BBGT[:, 1], bb_det[1])\n ixmax = np.minimum(BBGT[:, 2], bb_det[2])\n iymax = np.minimum(BBGT[:, 3], bb_det[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb_det[2] - bb_det[0] + 1.) * (bb_det[3] - bb_det[1] + 1.) +\n (BBGT[:, 2] - BBGT[:, 0] + 1.) *\n (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n\n # plot\n if 0:\n det_info = [bb_det, hstate_det, hside_det, objbbox_det, score_det, objbbox_score_det]\n gt_info = [BBGT[jmax], hstate_GT[jmax], hside_GT[jmax], objbbox_GT[jmax]]\n debug_det_gt(image_ids[d], det_info, gt_info, d)\n\n\n if constraint == '':\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]: # add diff constraints here for diff eval\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n \n\n elif constraint == 'handstate':\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax] and hstate_GT[jmax] == hstate_det: # add diff constraints here for diff eval\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n\n \n elif constraint == 'handside':\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax] and hside_GT[jmax] == hside_det: # add diff constraints here for diff eval\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n \n\n elif constraint == 'objectbbox':\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax] and val_objectbbox(objbbox_GT[jmax], objbbox_det, image_ids[d]): # add diff constraints here for diff eval\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n \n \n elif constraint == 'all':\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax] and hstate_GT[jmax] == hstate_det and hside_GT[jmax] == hside_det and val_objectbbox(objbbox_GT[jmax], objbbox_det, image_ids[d]): # add diff constraints here for diff eval\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n\n\n return rec, prec, ap\n\n\n\n# ======== debug ======== #\ndef debug_det_gt(image_name, det_info, gt_info, d):\n \n os.makedirs('/y/dandans/Hand_Object_Detection/faster-rcnn.pytorch/images/debug', exist_ok=True)\n # det_info = [bb_det, hstate_det, hside_det, objbbox_det, score_det, objbbox_score_det]\n # gt_info = [BBGT[jmax], hstate_GT[jmax], hside_GT[jmax], objbbox_GT[jmax]]\n\n genre, vid_folder = image_name.split('_', 1)[0], image_name.split('_', 1)[1][:13]\n genre_name = f'{genre}_videos'\n image_path = os.path.join('/y/jiaqig/hand_cache', genre_name, vid_folder, image_name+'.jpg')\n image = Image.open(image_path).convert(\"RGBA\")\n\n\n draw = ImageDraw.Draw(image)\n font = ImageFont.truetype('/y/dandans/Hand_Object_Detection/faster-rcnn.pytorch/lib/model/utils/times_b.ttf', size=20)\n width, height = image.size \n\n # ======== plot det ======== #\n \n hand_bbox_det = list(det_info[0])\n hand_bbox_det = list(int(np.round(x)) for x in hand_bbox_det)\n image = draw_hand_mask(image, draw, 0, hand_bbox_det, det_info[4], det_info[2], det_info[1], width, height, font)\n\n if det_info[3] is not None:\n object_bbox_det = list(det_info[3])\n object_bbox_det = list(int(np.round(x)) for x in object_bbox_det)\n image = draw_obj_mask(image, draw, 0, object_bbox_det, det_info[5], width, height, font)\n\n if det_info[1] > 0 : # in contact hand\n\n obj_cc, hand_cc = calculate_center_PIL(hand_bbox_det), calculate_center_PIL(object_bbox_det)\n draw_line_point(draw, 0, (int(hand_cc[0]), int(hand_cc[1])), (int(obj_cc[0]), int(obj_cc[1])))\n\n\n # ======== plot gt ======== #\n\n hand_bbox_gt = list(gt_info[0])\n hand_bbox_gt = list(int(np.round(x)) for x in hand_bbox_gt)\n image = draw_hand_mask(image, draw, 1, hand_bbox_gt, 1.0, gt_info[2], gt_info[1], width, height, font)\n\n if gt_info[3] is not None:\n object_bbox_gt = list(gt_info[3])\n object_bbox_gt = list(int(np.round(x)) for x in object_bbox_gt)\n image = draw_obj_mask(image, draw, 1, object_bbox_gt, 1.0, width, height, font)\n \n if gt_info[1] > 0: # in contact hand\n\n obj_cc, hand_cc = calculate_center_PIL(hand_bbox_gt), calculate_center_PIL(object_bbox_gt)\n draw_line_point(draw, 1, (int(hand_cc[0]), int(hand_cc[1])), (int(obj_cc[0]), int(obj_cc[1])))\n\n \n # ======== save ======== #\n \n save_name = image_name + f'_draw_{d:04d}.png'\n image.save(os.path.join('/y/dandans/Hand_Object_Detection/faster-rcnn.pytorch/images/debug', save_name))\n\n\n\n\n\n\n# ======== auxiluary functions ======== #\ndef val_objectbbox(objbbox_GT, objbbox_det, imagepath, threshold=0.5):\n if objbbox_GT is None and objbbox_det is None:\n #print('None - None')\n return True\n elif objbbox_GT is not None and objbbox_det is not None:\n if get_iou(objbbox_GT, objbbox_det) > threshold:\n #print('Yes', get_iou(objbbox_GT, objbbox_det), objbbox_GT, objbbox_det, imagepath)\n return True\n #else:\n #print('No', get_iou(objbbox_GT, objbbox_det), objbbox_GT, objbbox_det, imagepath)\n \n else:\n #print(f'None - Float')\n False\n \n \n\ndef get_iou(bb1, bb2):\n\n\n assert(bb1[0] <= bb1[2] and bb1[1] <= bb1[3] and bb2[0] <= bb2[2] and bb2[1] <= bb2[3]), print(bb1, bb2)\n\n # determine the coordinates of the intersection rectangle\n x_left = max(bb1[0], bb2[0])\n y_top = max(bb1[1], bb2[1])\n x_right = min(bb1[2], bb2[2])\n y_bottom = min(bb1[3], bb2[3])\n\n if x_right < x_left or y_bottom < y_top:\n return 0.0\n \n intersection_area = (x_right - x_left) * (y_bottom - y_top)\n\n bb1_area = (bb1[2] - bb1[0]) * (bb1[3] - bb1[1])\n bb2_area = (bb2[2] - bb2[0]) * (bb2[3] - bb2[1])\n\n iou = intersection_area / float(bb1_area + bb2_area - intersection_area)\n assert iou >= 0.0\n assert iou <= 1.0\n return iou\n\n\ndef extract_BB(detpath, extract_class):\n '''\n @description\n ---> hand:\n image_ids item = image_path\n BB item =[score(0), bbox(1:1+4), state(5), vector(6:6+3), side(9)]\n --> object:\n image_ids item = image_path\n BB item = [score(0), bbox(1,1+4)]\n '''\n # read dets\n detfile = detpath.format(extract_class)\n with open(detfile, 'r') as f:\n lines = f.readlines()\n splitlines = [x.strip().split(' ') for x in lines]\n image_ids = [x[0] for x in splitlines]\n BB = np.array([[float(z) for z in x[1:]] for x in splitlines])\n\n #print(f'in-function, det len: {extract_class}-bbox={len(BB)}, {extract_class}_image={len(image_ids)}, {detfile}')\n return BB, image_ids, detfile\n\ndef make_hand_object_dict(BB_o, BB_h, image_o, image_h):\n ho_dict = {}\n for bb_h, id_h in zip(BB_h, image_h):\n if id_h in ho_dict:\n ho_dict[id_h]['hands'].append(bb_h)\n else:\n ho_dict[id_h] = {'hands': [bb_h], 'objects': []}\n\n for bb_o, id_o in zip(BB_o, image_o):\n if id_o in ho_dict:\n ho_dict[id_o]['objects'].append(bb_o)\n else:\n ho_dict[id_o] = {'hands': [], 'objects': [bb_o]}\n return ho_dict\n\ndef calculate_center(bb):\n return [(bb[1] + bb[3])/2, (bb[2] + bb[4])/2]\n\n\n'''\n@description: \n[image_path, score, handbbox, state, vector, side, objectbbox]\n'''\ndef gen_det_result(ho_dict):\n\n # take all results\n hand_det_res = []\n\n for key, info in ho_dict.items():\n object_cc_list = []\n object_bb_list = []\n object_score_list = []\n\n for j, object_info in enumerate(info['objects']):\n object_bbox = [object_info[1], object_info[2], object_info[3], object_info[4]]\n object_cc_list.append(calculate_center(object_info)) # is it wrong???\n object_bb_list.append(object_bbox)\n object_score_list.append(float(object_info[0]))\n object_cc_list = np.array(object_cc_list)\n\n for i, hand_info in enumerate(info['hands']):\n hand_path = key\n hand_score = hand_info[0]\n hand_bbox = hand_info[1:5]\n hand_state = hand_info[5]\n hand_vector = hand_info[6:9]\n hand_side = hand_info[9] \n \n if hand_state <= 0 or len(object_cc_list) == 0 :\n to_add = [hand_path, hand_score, hand_bbox, hand_state, hand_vector, hand_side, None, None]\n hand_det_res.append(to_add)\n else:\n hand_cc = np.array(calculate_center(hand_info))\n point_cc = np.array([(hand_cc[0]+hand_info[6]*10000*hand_info[7]), (hand_cc[1]+hand_info[6]*10000*hand_info[8])])\n dist = np.sum( (object_cc_list - point_cc)**2 , axis=1)\n\n dist_min = np.argmin(dist)\n # get object bbox\n target_object_score = object_score_list[dist_min]\n #\n target_object_bbox = object_bb_list[dist_min]\n to_add = [hand_path, hand_score, hand_bbox, hand_state, hand_vector, hand_side, target_object_bbox, target_object_score]\n hand_det_res.append(to_add)\n \n \n return hand_det_res" ]
[ [ "numpy.maximum", "numpy.minimum", "numpy.arange", "numpy.cumsum", "numpy.sort", "numpy.finfo", "numpy.concatenate", "numpy.max", "numpy.round", "numpy.argmax", "numpy.where", "numpy.argmin", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
GwydionJon/Group_1_Developing_Scientific_Software
[ "ca15bbadd8b311f2ba50425bf9c13c501f5c9af4" ]
[ "src/module_draft/main.py" ]
[ "# Author: Tobias Kaczun, Leonie Kreis, Gwydion Daskalakis\n# Date: 19.03.21\n# Package: DSS Analysis Package\n\nimport sys\nimport numpy as np\nimport reader\nfrom user_input import user_input\nimport analysis\n\n\ndef main():\n \"\"\"Main function for commandline call\n \"\"\"\n # end user version for user_input\n args = user_input(sys.argv[1:])\n\n # add your own args = user_input() for testing and debugging so that you\n # don't have to call the script with full command line input\n\n # args = user_input(['Input/Task1/', '-o',\n # 'Output/Task2/'])\n\n # read files\n reader_obj = reader.FileReader(args.path)\n input_df = reader_obj.read()\n\n # perform statistical analysis\n stat_ana = analysis.Statistical_Analysis(args.output)\n stat_ana.correlation(input_df['npop.t'])\n stat_ana.eucl_distance(input_df['table.dat'])\n\n # perfomr numerical analysis\n num_ana = analysis.Numerical_Analysis(args.output)\n\n # return new df with the desired columns\n df_efield_relevant = num_ana.remove_low_variance(input_df['efield.t'])\n\n # fft with freq of the df\n df_efield_fft = num_ana.fft_with_freq_analysis(df_efield_relevant, \"y\")\n\n # disabled plot to not have it get on my nerves\n num_ana.plot_and_save(df_efield_fft,\n \"freq\",\n \"intensitys\",\n \"efield_fft_analysis\",\n xlabel=\"Freq\",\n show_graph=False)\n\n df_autocorr = num_ana.autocorrelation(input_df[\"nstate_i.t\"], \"time\")\n\n num_ana.plot_and_save(df_autocorr, \"time\", [\"autocorr_abs\",\n \"autocorr_real\",\n \"autocorr_imag\"],\n \"nstate_autocorr_analysis\", xlabel=\"time\",\n show_graph=False)\n\n df_autocorr_fft = num_ana.fft_with_freq_analysis(df_autocorr,\n \"autocorr\",\n type=\"complex\")\n\n # adding abs**2 to the dataframe\n df_autocorr_fft[\"intensitys_squared\"] = np.abs(\n df_autocorr_fft[\"intensitys\"].values)**2\n num_ana.plot_and_save(df_autocorr_fft, \"freq\", [\"intensitys\",\n \"intensitys_squared\"],\n \"nstate_autocorr_fft_analysis\", xlabel=\"Freq\",\n show_graph=True, crop_edge=3)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.abs" ] ]
homeostasie/petits-pedestres
[ "bf20d94a5f2b12d2bb860ebb06a6b18641271020" ]
[ "2014.d/3-differential-equations-in-action.d/1-houston-we-have-a-problem/pb1-houston-we-have-a-problem.py" ]
[ "\"\"\" \n\tDifferential Equations in Action\n\tLesson 1 - Houston We have a problem\n\"\"\"\t\n# Import\nimport math # import math.cos(), math.sin(), math.pi\nimport numpy # import distance\nimport matplotlib.pyplot as plt # import plot\n\n\"\"\" \n-----------------------------------------------------------------------\n\t- 1 - Exercices\n-----------------------------------------------------------------------\n\"\"\"\n\n# Exo 1 - Define x, sin(x) and cos(x)\ndef sin_cos(num_steps):\n\n x = numpy.zeros(num_steps)\n sin_x = numpy.zeros(num_steps)\n cos_x = numpy.zeros(num_steps)\n\n for i in range(num_steps):\n x[i] =2 * math.pi * i / (num_steps-1)\n sin_x[i] = math.cos(x[i])\n cos_x[i] = math.sin(x[i])\n \n return x, sin_x, cos_x\n\n# Exo 2 - Forward Euler method\ndef forward_euler(time_steps, num_steps, g):\n\n t = numpy.zeros(num_steps + 1)\n x = numpy.zeros(num_steps + 1)\n v = numpy.zeros(num_steps + 1)\n\n for step in range(num_steps):\n t[step + 1] = (step+1) * time_steps\n x[step + 1] = x[step] + time_steps*v[step]\n v[step + 1] = v[step] - time_steps*g\n \n return t, x, v\n\n# Exo 3 - Spaceship acceleration with Moon and Earth at a t-time\ndef acceleration(moon_position, spaceship_position):\n X_ES = - spaceship_position\n X_MS = moon_position - spaceship_position\n\n F_ES = ( gravitational_constant * earth_mass / ( numpy.linalg.norm(X_ES)**3) ) * X_ES\n F_MS = ( gravitational_constant * earth_mass / ( numpy.linalg.norm(X_MS)**3) ) * X_MS\n\n return F_ES + F_MS\n\n\"\"\" \n-----------------------------------------------------------------------\n\t- 2 - Problems\n-----------------------------------------------------------------------\n\"\"\"\n\n# PROBLEM 1\n# Modelise one revolution of the moon around the earth, assuming that \n# the orbit is circular.\ndef orbit(num_steps):\n x = numpy.zeros([num_steps + 1, 2])\n for i in range(num_steps + 1):\n x[i,0] = moon_distance * math.cos(2. * math.pi * i /num_steps)\n x[i,1] = moon_distance * math.sin(2. * math.pi * i /num_steps)\n\n return x\n\n# PROBLEM 2\n# Free fall at initial speed with initial angles.\ndef trajectory(time_steps, num_steps, g, initial_speed, inital_angles):\n acceleration = numpy.array([0., -g])\n x = numpy.zeros([num_steps + 1, 2]) # m\n v = numpy.zeros([num_steps + 1, 2]) # m / s\n\t# init position and speed\n x[0,:] = [0,0]\n v[0,:] = [initial_speed * math.cos(inital_angles), initial_speed * math.sin(inital_angles) ]\n for step in range(num_steps):\n\t\t# Forward Euler Method\n v[step + 1,:] = v[step] + time_steps*acceleration\n x[step + 1,:] = x[step] + time_steps*v[step]\n \n return x, v\n\n\n# PROBLEM 3\n# Spaceship Acceleration\ndef acceleration(spaceship_position):\n a = numpy.zeros(2) # m\n a[0] = - gravitational_constant * (earth_mass * spaceship_position[0]) / numpy.linalg.norm(spaceship_position)**3 \n a[1] = - gravitational_constant * (earth_mass * spaceship_position[1]) / numpy.linalg.norm(spaceship_position)**3 \n return a\n\n# Trajectory of a spacecraft with the given initial position and velocity.\ndef ship_trajectory(time_steps, num_steps, x_init, v_init):\n x = numpy.zeros([num_steps + 1, 2]) # m\n v = numpy.zeros([num_steps + 1, 2]) # m / s\n # init position and speed\n x[0, :] = x_init\n v[0, :] = v_init\n\n for step in range(num_steps):\n\t\t# Forward Euler Method\n v[step + 1,:] = v[step] + time_steps*acceleration(x[step,:]) \n x[step + 1,:] = x[step] + time_steps*v[step]\n \n return x, v\n\n\"\"\" \n-----------------------------------------------------------------------\n\t- 3 - Plot\n-----------------------------------------------------------------------\n\"\"\"\n\n# Exo 1 - plot sin(x) and cos(x)\ndef plot_sin_cos():\n plt.plot(x, sin_x)\n plt.plot(x, cos_x)\n plt.show()\n\n# Exo 2 - plot Forward Euler Method\ndef plot_euler():\n axes_height = plt.subplot(211)\n plt.plot(t, x)\n axes_velocity = plt.subplot(212)\n plt.plot(t, v)\n axes_height.set_ylabel('Height in m')\n axes_velocity.set_ylabel('Velocity in m/s')\n axes_velocity.set_xlabel('Time in s')\n plt.show() \n\n# Pb1 - plot moon orbit\ndef plot_orbit():\n plt.axis('equal')\n plt.plot(x[:, 0], x[:, 1])\n axes = plt.gca()\n axes.set_xlabel('Longitudinal position in m')\n axes.set_ylabel('Lateral position in m')\n plt.show()\n\n# Pb2 - plot Earth free fall\ndef plot_earth_free_fall(time_steps, num_steps, earth_gravitation, initial_speed, inital_angles):\n for angles in inital_angles:\n x,v = trajectory(time_steps, num_steps, earth_gravitation, initial_speed, angles)\n plt.plot(x[:, 0], x[:, 1])\n\n axes = plt.gca()\n axes.set_xlabel('x position in m')\n axes.set_ylabel('y position in m')\n plt.show()\n\n\n# Pb3 - plot spaceship orbit\ndef plot_ship_trajectory():\n plt.plot(x[:, 0], x[:, 1])\n plt.scatter(0, 0)\n plt.axis('equal')\n axes = plt.gca()\n axes.set_xlabel('Longitudinal position in m')\n axes.set_ylabel('Lateral position in m')\n plt.show()\n\n\"\"\" \n-----------------------------------------------------------------------\n\t- Main\n-----------------------------------------------------------------------\n\"\"\"\n\n# STEPS\nnum_steps = 50 # Max iteration number\ntime_steps = 0.1 # s\n\n# EARTH and MOON DATA\n# Mass\nearth_mass = 5.97e24 # kg\nmoon_mass = 7.35e22 # kg\n\n# Distance\nmoon_distance = 384e6 # m\n\n# Gravitation\ngravitational_constant = 6.67e-11 # N m2 / kg2\nearth_gravitation = 9.81 # m / s2\n\n\"\"\"\n-------------------- Exercices --------------------\n\"\"\"\n\n# Exo 1 - Cosinus and sinus\nx, sin_x, cos_x = sin_cos(num_steps)\n#plot_sin_cos() # uncomment for ploting\n\n# Exo 2 - Forward Euler Method\nt, x, v = forward_euler(time_steps, num_steps, earth_gravitation)\n#plot_euler() # uncomment for ploting\n\n\"\"\"\n-------------------- Problems --------------------\n\"\"\"\n\n# pb1 - Moon orbit\nx = orbit(num_steps)\n#plot_orbit() # uncomment for ploting\n\n# pb2 - Earth free fall\n# Initial value\ninitial_speed = 20. # m / s\ninitial_angles = math.pi /180 * numpy.linspace(20., 70., 6)\n#plot_earth_free_fall(time_steps, num_steps, earth_gravitation, initial_speed, initial_angles) # uncomment for ploting\n\n\n# pb3 - spaceship orbit\ntime_steps = 0.1 # s\nnum_steps = 130000\n\nx_init = [15e6, 1e6]\nv_init = [2e3, 4e3]\n\nx, v = ship_trajectory(time_steps, num_steps, x_init, v_init)\nplot_ship_trajectory() # uncomment for ploting\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.scatter", "numpy.linspace", "numpy.linalg.norm", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.axis", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show" ] ]
carthurs/plotly.py
[ "23cbf9bef63ffaf898f89d7e0d6862ca25f5eae0" ]
[ "packages/python/plotly/plotly/tests/test_core/test_px/test_px_input.py" ]
[ "import plotly.express as px\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport plotly.graph_objects as go\nimport plotly\nfrom plotly.express._core import build_dataframe\nfrom pandas.util.testing import assert_frame_equal\n\nattrables = (\n [\"x\", \"y\", \"z\", \"a\", \"b\", \"c\", \"r\", \"theta\", \"size\", \"dimensions\"]\n + [\"custom_data\", \"hover_name\", \"hover_data\", \"text\"]\n + [\"error_x\", \"error_x_minus\"]\n + [\"error_y\", \"error_y_minus\", \"error_z\", \"error_z_minus\"]\n + [\"lat\", \"lon\", \"locations\", \"animation_group\"]\n)\narray_attrables = [\"dimensions\", \"custom_data\", \"hover_data\"]\ngroup_attrables = [\"animation_frame\", \"facet_row\", \"facet_col\", \"line_group\"]\n\nall_attrables = attrables + group_attrables + [\"color\"]\n\n\ndef test_numpy():\n fig = px.scatter(x=[1, 2, 3], y=[2, 3, 4], color=[1, 3, 9])\n assert np.all(fig.data[0].x == np.array([1, 2, 3]))\n assert np.all(fig.data[0].y == np.array([2, 3, 4]))\n assert np.all(fig.data[0].marker.color == np.array([1, 3, 9]))\n\n\ndef test_numpy_labels():\n fig = px.scatter(\n x=[1, 2, 3], y=[2, 3, 4], labels={\"x\": \"time\"}\n ) # other labels will be kw arguments\n assert fig.data[0][\"hovertemplate\"] == \"time=%{x}<br>y=%{y}\"\n\n\ndef test_with_index():\n tips = px.data.tips()\n fig = px.scatter(tips, x=tips.index, y=\"total_bill\")\n assert fig.data[0][\"hovertemplate\"] == \"index=%{x}<br>total_bill=%{y}\"\n fig = px.scatter(tips, x=tips.index, y=tips.total_bill)\n assert fig.data[0][\"hovertemplate\"] == \"index=%{x}<br>total_bill=%{y}\"\n fig = px.scatter(tips, x=tips.index, y=tips.total_bill, labels={\"index\": \"number\"})\n assert fig.data[0][\"hovertemplate\"] == \"number=%{x}<br>total_bill=%{y}\"\n # We do not allow \"x=index\"\n with pytest.raises(ValueError) as err_msg:\n fig = px.scatter(tips, x=\"index\", y=\"total_bill\")\n assert \"To use the index, pass it in directly as `df.index`.\" in str(\n err_msg.value\n )\n tips = px.data.tips()\n tips.index.name = \"item\"\n fig = px.scatter(tips, x=tips.index, y=\"total_bill\")\n assert fig.data[0][\"hovertemplate\"] == \"item=%{x}<br>total_bill=%{y}\"\n\n\ndef test_pandas_series():\n tips = px.data.tips()\n before_tip = tips.total_bill - tips.tip\n fig = px.bar(tips, x=\"day\", y=before_tip)\n assert fig.data[0].hovertemplate == \"day=%{x}<br>y=%{y}\"\n fig = px.bar(tips, x=\"day\", y=before_tip, labels={\"y\": \"bill\"})\n assert fig.data[0].hovertemplate == \"day=%{x}<br>bill=%{y}\"\n\n\ndef test_several_dataframes():\n df = pd.DataFrame(dict(x=[0, 1], y=[1, 10], z=[0.1, 0.8]))\n df2 = pd.DataFrame(dict(time=[23, 26], money=[100, 200]))\n fig = px.scatter(df, x=\"z\", y=df2.money, size=\"x\")\n assert fig.data[0].hovertemplate == \"z=%{x}<br>y=%{y}<br>x=%{marker.size}\"\n fig = px.scatter(df2, x=df.z, y=df2.money, size=df.z)\n assert fig.data[0].hovertemplate == \"x=%{x}<br>money=%{y}<br>size=%{marker.size}\"\n # Name conflict\n with pytest.raises(NameError) as err_msg:\n fig = px.scatter(df, x=\"z\", y=df2.money, size=\"y\")\n assert \"A name conflict was encountered for argument y\" in str(err_msg.value)\n with pytest.raises(NameError) as err_msg:\n fig = px.scatter(df, x=\"z\", y=df2.money, size=df.y)\n assert \"A name conflict was encountered for argument y\" in str(err_msg.value)\n\n # No conflict when the dataframe is not given, fields are used\n df = pd.DataFrame(dict(x=[0, 1], y=[3, 4]))\n df2 = pd.DataFrame(dict(x=[3, 5], y=[23, 24]))\n fig = px.scatter(x=df.y, y=df2.y)\n assert np.all(fig.data[0].x == np.array([3, 4]))\n assert np.all(fig.data[0].y == np.array([23, 24]))\n assert fig.data[0].hovertemplate == \"x=%{x}<br>y=%{y}\"\n\n df = pd.DataFrame(dict(x=[0, 1], y=[3, 4]))\n df2 = pd.DataFrame(dict(x=[3, 5], y=[23, 24]))\n df3 = pd.DataFrame(dict(y=[0.1, 0.2]))\n fig = px.scatter(x=df.y, y=df2.y, size=df3.y)\n assert np.all(fig.data[0].x == np.array([3, 4]))\n assert np.all(fig.data[0].y == np.array([23, 24]))\n assert fig.data[0].hovertemplate == \"x=%{x}<br>y=%{y}<br>size=%{marker.size}\"\n\n df = pd.DataFrame(dict(x=[0, 1], y=[3, 4]))\n df2 = pd.DataFrame(dict(x=[3, 5], y=[23, 24]))\n df3 = pd.DataFrame(dict(y=[0.1, 0.2]))\n fig = px.scatter(x=df.y, y=df2.y, hover_data=[df3.y])\n assert np.all(fig.data[0].x == np.array([3, 4]))\n assert np.all(fig.data[0].y == np.array([23, 24]))\n assert (\n fig.data[0].hovertemplate == \"x=%{x}<br>y=%{y}<br>hover_data_0=%{customdata[0]}\"\n )\n\n\ndef test_name_heuristics():\n df = pd.DataFrame(dict(x=[0, 1], y=[3, 4], z=[0.1, 0.2]))\n fig = px.scatter(df, x=df.y, y=df.x, size=df.y)\n assert np.all(fig.data[0].x == np.array([3, 4]))\n assert np.all(fig.data[0].y == np.array([0, 1]))\n assert fig.data[0].hovertemplate == \"y=%{marker.size}<br>x=%{y}\"\n\n\ndef test_repeated_name():\n iris = px.data.iris()\n fig = px.scatter(\n iris,\n x=\"sepal_width\",\n y=\"sepal_length\",\n hover_data=[\"petal_length\", \"petal_width\", \"species_id\"],\n custom_data=[\"species_id\", \"species\"],\n )\n assert fig.data[0].customdata.shape[1] == 4\n\n\ndef test_arrayattrable_numpy():\n tips = px.data.tips()\n fig = px.scatter(\n tips, x=\"total_bill\", y=\"tip\", hover_data=[np.random.random(tips.shape[0])]\n )\n assert (\n fig.data[0][\"hovertemplate\"]\n == \"total_bill=%{x}<br>tip=%{y}<br>hover_data_0=%{customdata[0]}\"\n )\n tips = px.data.tips()\n fig = px.scatter(\n tips,\n x=\"total_bill\",\n y=\"tip\",\n hover_data=[np.random.random(tips.shape[0])],\n labels={\"hover_data_0\": \"suppl\"},\n )\n assert (\n fig.data[0][\"hovertemplate\"]\n == \"total_bill=%{x}<br>tip=%{y}<br>suppl=%{customdata[0]}\"\n )\n\n\ndef test_wrong_column_name():\n with pytest.raises(ValueError) as err_msg:\n fig = px.scatter(px.data.tips(), x=\"bla\", y=\"wrong\")\n assert \"Value of 'x' is not the name of a column in 'data_frame'\" in str(\n err_msg.value\n )\n\n\ndef test_missing_data_frame():\n with pytest.raises(ValueError) as err_msg:\n fig = px.scatter(x=\"arg1\", y=\"arg2\")\n assert \"String or int arguments are only possible\" in str(err_msg.value)\n\n\ndef test_wrong_dimensions_of_array():\n with pytest.raises(ValueError) as err_msg:\n fig = px.scatter(x=[1, 2, 3], y=[2, 3, 4, 5])\n assert \"All arguments should have the same length.\" in str(err_msg.value)\n\n\ndef test_wrong_dimensions_mixed_case():\n with pytest.raises(ValueError) as err_msg:\n df = pd.DataFrame(dict(time=[1, 2, 3], temperature=[20, 30, 25]))\n fig = px.scatter(df, x=\"time\", y=\"temperature\", color=[1, 3, 9, 5])\n assert \"All arguments should have the same length.\" in str(err_msg.value)\n\n\ndef test_wrong_dimensions():\n with pytest.raises(ValueError) as err_msg:\n fig = px.scatter(px.data.tips(), x=\"tip\", y=[1, 2, 3])\n assert \"All arguments should have the same length.\" in str(err_msg.value)\n # the order matters\n with pytest.raises(ValueError) as err_msg:\n fig = px.scatter(px.data.tips(), x=[1, 2, 3], y=\"tip\")\n assert \"All arguments should have the same length.\" in str(err_msg.value)\n with pytest.raises(ValueError):\n fig = px.scatter(px.data.tips(), x=px.data.iris().index, y=\"tip\")\n # assert \"All arguments should have the same length.\" in str(err_msg.value)\n\n\ndef test_multiindex_raise_error():\n index = pd.MultiIndex.from_product(\n [[1, 2, 3], [\"a\", \"b\"]], names=[\"first\", \"second\"]\n )\n df = pd.DataFrame(np.random.random((6, 3)), index=index, columns=[\"A\", \"B\", \"C\"])\n # This is ok\n fig = px.scatter(df, x=\"A\", y=\"B\")\n with pytest.raises(TypeError) as err_msg:\n fig = px.scatter(df, x=df.index, y=\"B\")\n assert \"pandas MultiIndex is not supported by plotly express\" in str(\n err_msg.value\n )\n\n\ndef test_build_df_from_lists():\n # Just lists\n args = dict(x=[1, 2, 3], y=[2, 3, 4], color=[1, 3, 9])\n output = {key: key for key in args}\n df = pd.DataFrame(args)\n args[\"data_frame\"] = None\n out = build_dataframe(args, all_attrables, array_attrables)\n assert_frame_equal(df.sort_index(axis=1), out[\"data_frame\"].sort_index(axis=1))\n out.pop(\"data_frame\")\n assert out == output\n\n # Arrays\n args = dict(x=np.array([1, 2, 3]), y=np.array([2, 3, 4]), color=[1, 3, 9])\n output = {key: key for key in args}\n df = pd.DataFrame(args)\n args[\"data_frame\"] = None\n out = build_dataframe(args, all_attrables, array_attrables)\n assert_frame_equal(df.sort_index(axis=1), out[\"data_frame\"].sort_index(axis=1))\n out.pop(\"data_frame\")\n assert out == output\n\n\ndef test_build_df_with_index():\n tips = px.data.tips()\n args = dict(data_frame=tips, x=tips.index, y=\"total_bill\")\n out = build_dataframe(args, all_attrables, array_attrables)\n assert_frame_equal(tips.reset_index()[out[\"data_frame\"].columns], out[\"data_frame\"])\n\n\ndef test_splom_case():\n iris = px.data.iris()\n fig = px.scatter_matrix(iris)\n assert len(fig.data[0].dimensions) == len(iris.columns)\n dic = {\"a\": [1, 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\n fig = px.scatter_matrix(dic)\n assert np.all(fig.data[0].dimensions[0].values == np.array(dic[\"a\"]))\n ar = np.arange(9).reshape((3, 3))\n fig = px.scatter_matrix(ar)\n assert np.all(fig.data[0].dimensions[0].values == ar[:, 0])\n\n\ndef test_int_col_names():\n # DataFrame with int column names\n lengths = pd.DataFrame(np.random.random(100))\n fig = px.histogram(lengths, x=0)\n assert np.all(np.array(lengths).flatten() == fig.data[0].x)\n # Numpy array\n ar = np.arange(100).reshape((10, 10))\n fig = px.scatter(ar, x=2, y=8)\n assert np.all(fig.data[0].x == ar[:, 2])\n\n\ndef test_data_frame_from_dict():\n fig = px.scatter({\"time\": [0, 1], \"money\": [1, 2]}, x=\"time\", y=\"money\")\n assert fig.data[0].hovertemplate == \"time=%{x}<br>money=%{y}\"\n assert np.all(fig.data[0].x == [0, 1])\n\n\ndef test_arguments_not_modified():\n iris = px.data.iris()\n petal_length = iris.petal_length\n hover_data = [iris.sepal_length]\n fig = px.scatter(iris, x=petal_length, y=\"petal_width\", hover_data=hover_data)\n assert iris.petal_length.equals(petal_length)\n assert iris.sepal_length.equals(hover_data[0])\n\n\ndef test_pass_df_columns():\n tips = px.data.tips()\n fig = px.histogram(\n tips,\n x=\"total_bill\",\n y=\"tip\",\n color=\"sex\",\n marginal=\"rug\",\n hover_data=tips.columns,\n )\n assert fig.data[1].hovertemplate.count(\"customdata\") == len(tips.columns)\n tips_copy = px.data.tips()\n assert tips_copy.columns.equals(tips.columns)\n\n\ndef test_size_column():\n df = px.data.tips()\n fig = px.scatter(df, x=df[\"size\"], y=df.tip)\n assert fig.data[0].hovertemplate == \"size=%{x}<br>tip=%{y}\"\n" ]
[ [ "numpy.random.random", "numpy.arange", "pandas.DataFrame", "numpy.all", "pandas.MultiIndex.from_product", "numpy.array" ] ]
uberdeveloper/fastbt
[ "0a056e269b9ce65a282bfb764e3b0de2245739f0" ]
[ "tests/test_metrics.py" ]
[ "import unittest\nimport pandas as pd\n\nfrom fastbt.metrics import *\n\n\nclass TestSpread(unittest.TestCase):\n def setUp(self):\n dates = pd.date_range(\"2016-01-01\", \"2019-12-31\")\n s = pd.Series(index=dates)\n s.loc[:] = 1\n self.s = s\n\n def test_default(self):\n s = self.s.copy()\n df = spread_test(s)\n answer = pd.DataFrame(\n {\n \"num_profit\": [4, 16, 48],\n \"profit\": [1461.0, 1461.0, 1461.0],\n \"num_loss\": [0, 0, 0],\n \"loss\": [0.0, 0.0, 0.0],\n },\n index=[\"Y\", \"Q\", \"M\"],\n )\n assert answer.equals(df)\n" ]
[ [ "pandas.Series", "pandas.DataFrame", "pandas.date_range" ] ]
aburnap/DS2016_DesignFreedom_and_BrandRecognition
[ "a2cdf9d39d795d3f0a4361210d0249cb35f0350a" ]
[ "code/l1_logit_brand_recognition/l1_logit_brand_recognition.py" ]
[ "\nimport time\nimport numpy as np\nfrom sklearn import linear_model, cross_validation, preprocessing, svm\nfrom sklearn.grid_search import GridSearchCV\n\nDESIGNS = [elm+str(num) for elm in ['a','b','c','l'] for num in range(0,5)]\nmorphed_DESIGNS = [elm+str(num) for elm in ['A','B','C','L'] for num in range(0,8)]\nDESIGNS.extend(morphed_DESIGNS)\n\nATTRIBUTES = ['Active', 'Aggressive','Distinctive','Expressive','Innovative','Luxurious','Powerful','Sporty','Well Proportioned','Youthful']\n\nfeatures = np.zeros( [len(DESIGNS) ,len(ATTRIBUTES)])\n\nfor ind, attr in enumerate(ATTRIBUTES):\n features[:, ind] = np.loadtxt(\"../../data/processed_data/attribute_values/\"+attr+\"_full_rank.csv\", delimiter=',')\n\nbaseline_features = features[0:20, :]\n\nscaled_baseline_features = preprocessing.scale(baseline_features, axis=0)\n\nnum_data, num_features = np.shape(baseline_features)\n\na=[0]*5\nb=[1]*5\nc=[2]*5\nl=[3]*5\n\na.extend(b)\na.extend(c)\na.extend(l)\n\nbrand_labels = np.array(a)\n\n#---------------- Classifier Setup -------------------------------------------\nclf_dict = { 0: 'Log Reg L1',\n 1: 'Log Reg L2'}#,\n# 3: 'SGD w/ ElasticNet',\n# 4: 'Linear SVM'}\n# 4: 'Decision Tree',\n #5: 'Random Forest',\n #6: 'Linear SVM'}\n\nnum_classifiers = len(clf_dict)\nclf_array = np.empty((num_classifiers, ), dtype=object)\nnum_jobs = 1 # 12 if running on Foveros\n\n#---------- L1 Logistic Regression -----------------------------------\nlr_l1_tuned_parameters = [{'C': [0.01, 0.1, 1.0, 10, 100, 1000]}]\nlr_l1_clf = GridSearchCV(estimator=linear_model.LogisticRegression(penalty='l1'),\n param_grid=lr_l1_tuned_parameters, cv=3,\n scoring='accuracy', refit=True, n_jobs=num_jobs)\n\n#---------- L2 Logistic Regression -----------------------------------\nlr_l2_tuned_parameters = [{'C': [0.01, 0.1, 1.0, 10, 100, 1000]}]\nlr_l2_clf = GridSearchCV(estimator=linear_model.LogisticRegression(penalty='l2'),\n param_grid=lr_l2_tuned_parameters, cv=3,\n scoring='accuracy', refit=True, n_jobs=num_jobs)\n\nhuber_tuned_parameters = [{'alpha': [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]}]\nhuber_clf = GridSearchCV(estimator=linear_model.SGDClassifier(penalty='l2', loss='huber'),\n param_grid = huber_tuned_parameters, cv=3, scoring='accuracy',\n refit=True, n_jobs=1)\n\n#---------- Linear SVM -----------------------------------------------\nsvm_tuned_parameters = [{'C': [0.01, 0.1, 1.0, 10, 100, 1000]}]\nsvm_clf = GridSearchCV(estimator=svm.LinearSVC(loss='l2'), #dual=False\n param_grid = svm_tuned_parameters, cv=3, scoring='accuracy',\n refit=True, n_jobs=num_jobs)\n\n\nclf_array[0] = lr_l1_clf\nclf_array[1] = lr_l2_clf\n#clf_array[2] = huber_clf\n#clf_array[3] = svm_clf\n\nnum_experiments = 3\ntest_size_per_split = 0.2\nseed = 0\n#---------------- Define Error and Time Matrices -----------------------------\nclf_error_matrix = np.empty((num_classifiers, num_experiments))\nclf_time_matrix = np.empty((num_classifiers, num_experiments))\n\n#---------------- Split Dataset into Train/ Test -------------------------\ncv_iterator = cross_validation.ShuffleSplit(num_data, \n n_iter=num_experiments, test_size=test_size_per_split, \n random_state=seed)\n\n#---------------- Begin Multiple Experiments for Each Classifier ---------\nfor exp_index, (train_index, test_index) in enumerate(cv_iterator):\n\n train_x = baseline_features[train_index, :]\n train_t = brand_labels[train_index]\n test_x = baseline_features[test_index, :]\n test_t = brand_labels[test_index]\n\n #---------- Train, Test, and Time ------------------------------------\n for clf_index, clf in enumerate(clf_array):\n# if reg_index ==4:\n# continue\n start_time = time.time()\n clf.fit(train_x, train_t)\n clf_time_matrix[clf_index, exp_index] = time.time() - start_time\n clf_error_matrix[clf_index, exp_index] = clf.score(test_x, test_t)\n\ntrain_t\n\ntrain_x.shape\n\n#train_x\n\ntest_t\n\nclf_error_matrix\n\nclf_array[0].score(train_x,train_t)\n\nnp.mean(clf_error_matrix[0])\n\nclf_array[0].score(test_x,test_t)\n\nclf_time_matrix\n\nomega=clf_array[0].best_estimator_.coef_\n\nnp.savetxt(\"omega_30percent_brand_recognition.csv\", omega, delimiter=',')\n\nomega\n\nclf_array[0].best_estimator_.coef_\n\n\n" ]
[ [ "sklearn.linear_model.LogisticRegression", "sklearn.linear_model.SGDClassifier", "sklearn.cross_validation.ShuffleSplit", "numpy.loadtxt", "numpy.shape", "numpy.mean", "sklearn.svm.LinearSVC", "numpy.savetxt", "sklearn.preprocessing.scale", "numpy.array", "numpy.empty" ] ]
switiz/las.pytorch
[ "357f602f05e3dbde84f8cc37f97dabd7dc6397fe" ]
[ "model/las.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom model.encoder import Listener\nfrom model.decoder import Speller\n\nclass ListenAttendSpell(nn.Module):\n def __init__(self, listener, speller):\n super(ListenAttendSpell, self).__init__()\n self.listener = listener\n self.speller = speller\n\n def forward(self, inputs, inputs_length, targets, teacher_forcing_ratio=0.9, use_beam=False, beam_size=3):\n encoder_outputs = self.listener(inputs, inputs_length)\n decoder_outputs = self.speller(encoder_outputs, targets, teacher_forcing_ratio, use_beam, beam_size)\n\n return decoder_outputs\n\n def greedy_search(self, inputs, inputs_length):\n with torch.no_grad():\n outputs = self.forward(inputs, inputs_length, None, 0.0)\n\n return outputs.max(-1)[1]\n" ]
[ [ "torch.no_grad" ] ]
abcdvzz/pytorch-image-models
[ "aebc5b58c90adef48bdeef681f3f5f3d3936e1a0" ]
[ "timm/models/twins.py" ]
[ "\"\"\" Twins\nA PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers`\n - https://arxiv.org/pdf/2104.13840.pdf\n\nCode/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below\n\n\"\"\"\n# --------------------------------------------------------\n# Twins\n# Copyright (c) 2021 Meituan\n# Licensed under The Apache 2.0 License [see LICENSE for details]\n# Written by Xinjie Li, Xiangxiang Chu\n# --------------------------------------------------------\nimport math\nfrom copy import deepcopy\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom functools import partial\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom .layers import Mlp, DropPath, to_2tuple, trunc_normal_\nfrom .registry import register_model\nfrom .vision_transformer import Attention\nfrom .helpers import build_model_with_cfg, overlay_external_default_cfg\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,\n 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'patch_embeds.0.proj', 'classifier': 'head',\n **kwargs\n }\n\n\ndefault_cfgs = {\n 'twins_pcpvt_small': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth',\n ),\n 'twins_pcpvt_base': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_base-e5ecb09b.pth',\n ),\n 'twins_pcpvt_large': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_large-d273f802.pth',\n ),\n 'twins_svt_small': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_small-42e5f78c.pth',\n ),\n 'twins_svt_base': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_base-c2265010.pth',\n ),\n 'twins_svt_large': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_large-90f6aaa9.pth',\n ),\n}\n\nSize_ = Tuple[int, int]\n\n\nclass LocallyGroupedAttn(nn.Module):\n \"\"\" LSA: self attention within a group\n \"\"\"\n def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1):\n assert ws != 1\n super(LocallyGroupedAttn, self).__init__()\n assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\n\n self.dim = dim\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=True)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n self.ws = ws\n\n def forward(self, x, size: Size_):\n # There are two implementations for this function, zero padding or mask. We don't observe obvious difference for\n # both. You can choose any one, we recommend forward_padding because it's neat. However,\n # the masking implementation is more reasonable and accurate.\n B, N, C = x.shape\n H, W = size\n x = x.view(B, H, W, C)\n pad_l = pad_t = 0\n pad_r = (self.ws - W % self.ws) % self.ws\n pad_b = (self.ws - H % self.ws) % self.ws\n x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))\n _, Hp, Wp, _ = x.shape\n _h, _w = Hp // self.ws, Wp // self.ws\n x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3)\n qkv = self.qkv(x).reshape(\n B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)\n q, k, v = qkv[0], qkv[1], qkv[2]\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)\n x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)\n if pad_r > 0 or pad_b > 0:\n x = x[:, :H, :W, :].contiguous()\n x = x.reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n # def forward_mask(self, x, size: Size_):\n # B, N, C = x.shape\n # H, W = size\n # x = x.view(B, H, W, C)\n # pad_l = pad_t = 0\n # pad_r = (self.ws - W % self.ws) % self.ws\n # pad_b = (self.ws - H % self.ws) % self.ws\n # x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))\n # _, Hp, Wp, _ = x.shape\n # _h, _w = Hp // self.ws, Wp // self.ws\n # mask = torch.zeros((1, Hp, Wp), device=x.device)\n # mask[:, -pad_b:, :].fill_(1)\n # mask[:, :, -pad_r:].fill_(1)\n #\n # x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C\n # mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws)\n # attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws\n # attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0))\n # qkv = self.qkv(x).reshape(\n # B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)\n # # n_h, B, _w*_h, nhead, ws*ws, dim\n # q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head\n # attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws\n # attn = attn + attn_mask.unsqueeze(2)\n # attn = attn.softmax(dim=-1)\n # attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head\n # attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)\n # x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)\n # if pad_r > 0 or pad_b > 0:\n # x = x[:, :H, :W, :].contiguous()\n # x = x.reshape(B, N, C)\n # x = self.proj(x)\n # x = self.proj_drop(x)\n # return x\n\n\nclass GlobalSubSampleAttn(nn.Module):\n \"\"\" GSA: using a key to summarize the information for a group to be efficient.\n \"\"\"\n def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1):\n super().__init__()\n assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\n\n self.dim = dim\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = head_dim ** -0.5\n\n self.q = nn.Linear(dim, dim, bias=True)\n self.kv = nn.Linear(dim, dim * 2, bias=True)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n self.sr_ratio = sr_ratio\n if sr_ratio > 1:\n self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)\n self.norm = nn.LayerNorm(dim)\n else:\n self.sr = None\n self.norm = None\n\n def forward(self, x, size: Size_):\n B, N, C = x.shape\n q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n\n if self.sr is not None:\n x = x.permute(0, 2, 1).reshape(B, C, *size)\n x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1)\n x = self.norm(x)\n kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n k, v = kv[0], kv[1]\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n\n return x\n\n\nclass Block(nn.Module):\n\n def __init__(self, dim, num_heads, mlp_ratio=4., drop=0., attn_drop=0., drop_path=0.,\n act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, ws=None):\n super().__init__()\n self.norm1 = norm_layer(dim)\n if ws is None:\n self.attn = Attention(dim, num_heads, False, None, attn_drop, drop)\n elif ws == 1:\n self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, drop, sr_ratio)\n else:\n self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, drop, ws)\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n def forward(self, x, size: Size_):\n x = x + self.drop_path(self.attn(self.norm1(x), size))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n\n\nclass PosConv(nn.Module):\n # PEG from https://arxiv.org/abs/2102.10882\n def __init__(self, in_chans, embed_dim=768, stride=1):\n super(PosConv, self).__init__()\n self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), )\n self.stride = stride\n\n def forward(self, x, size: Size_):\n B, N, C = x.shape\n cnn_feat_token = x.transpose(1, 2).view(B, C, *size)\n x = self.proj(cnn_feat_token)\n if self.stride == 1:\n x += cnn_feat_token\n x = x.flatten(2).transpose(1, 2)\n return x\n\n def no_weight_decay(self):\n return ['proj.%d.weight' % i for i in range(4)]\n\n\nclass PatchEmbed(nn.Module):\n \"\"\" Image to Patch Embedding\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):\n super().__init__()\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n\n self.img_size = img_size\n self.patch_size = patch_size\n assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \\\n f\"img_size {img_size} should be divided by patch_size {patch_size}.\"\n self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]\n self.num_patches = self.H * self.W\n self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\n self.norm = nn.LayerNorm(embed_dim)\n\n def forward(self, x) -> Tuple[torch.Tensor, Size_]:\n B, C, H, W = x.shape\n\n x = self.proj(x).flatten(2).transpose(1, 2)\n x = self.norm(x)\n out_size = (H // self.patch_size[0], W // self.patch_size[1])\n\n return x, out_size\n\n\nclass Twins(nn.Module):\n \"\"\" Twins Vision Transfomer (Revisiting Spatial Attention)\n\n Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git\n \"\"\"\n def __init__(\n self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dims=(64, 128, 256, 512),\n num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), wss=None,\n block_cls=Block, F4=False, extra_norm=True, strides=(2, 2, 2), task='cls'):\n super().__init__()\n self.num_classes = num_classes\n self.depths = depths\n\n img_size = to_2tuple(img_size)\n prev_chs = in_chans\n self.patch_embeds = nn.ModuleList()\n self.pos_drops = nn.ModuleList()\n for i in range(len(depths)):\n self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i]))\n self.pos_drops.append(nn.Dropout(p=drop_rate))\n prev_chs = embed_dims[i]\n img_size = tuple(t // patch_size for t in img_size)\n patch_size = 2\n\n self.task = task\n if self.task == 'seg':\n self.F4=F4\n self.extra_norm = extra_norm\n self.strides = strides\n if self.extra_norm:\n self.norm_list = nn.ModuleList()\n for dim in embed_dims:\n self.norm_list.append(norm_layer(dim))\n\n if strides != (2, 2, 2):\n del self.patch_embeds\n self.patch_embeds = nn.ModuleList()\n s = 1\n for i in range(len(depths)):\n if i == 0:\n self.patch_embeds.append(PatchEmbed(img_size, patch_size, in_chans, embed_dims[i]))\n else:\n self.patch_embeds.append(\n PatchEmbed(img_size // patch_size // s, strides[i-1], embed_dims[i - 1], embed_dims[i]))\n s = s * strides[i-1]\n\n self.blocks = nn.ModuleList()\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n cur = 0\n for k in range(len(depths)):\n _block = nn.ModuleList([block_cls(\n dim=embed_dims[k], num_heads=num_heads[k], mlp_ratio=mlp_ratios[k], drop=drop_rate,\n attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[k],\n ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])])\n self.blocks.append(_block)\n cur += depths[k]\n\n self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims])\n\n self.norm = norm_layer(embed_dims[-1])\n\n # classification head\n self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()\n\n # init weights\n self.apply(self._init_weights)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()])\n\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n elif isinstance(m, nn.Conv2d):\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n fan_out //= m.groups\n m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1.0)\n m.bias.data.zero_()\n\n def forward_features(self, x):\n if self.task == 'cls':\n B = x.shape[0]\n for i, (embed, drop, blocks, pos_blk) in enumerate(\n zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)):\n x, size = embed(x)\n x = drop(x)\n for j, blk in enumerate(blocks):\n x = blk(x, size)\n if j == 0:\n x = pos_blk(x, size) # PEG here\n if i < len(self.depths) - 1:\n x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()\n x = self.norm(x)\n return x.mean(dim=1) # GAP here\n \n if self.task == 'seg':\n outputs = list()\n B = x.shape[0]\n for i, (embed, drop, blocks, pos_blk) in enumerate(\n zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)):\n x, size = embed(x)\n x = drop(x)\n for j, blk in enumerate(blocks):\n x = blk(x, size)\n if j == 0:\n x = pos_blk(x, size) # PEG here\n if self.extra_norm:\n x = self.norm_list[i](x)\n x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()\n outputs.append(x)\n return outputs\n\n def forward(self, x):\n x = self.forward_features(x)\n if self.task == 'cls':\n return self.head(x)\n if self.task == 'seg':\n if self.F4:\n x = x[3:4] \n return x\n\n\ndef _create_twins(variant, pretrained=False, **kwargs):\n if kwargs.get('features_only', None):\n raise RuntimeError('features_only not implemented for Vision Transformer models.')\n if kwargs.get('task', 'cls') not in ['cls', 'seg']:\n raise RuntimeError('twins in timm only supports \"cls\" and \"seg\" task now.')\n\n model = build_model_with_cfg(\n Twins, variant, pretrained,\n pretrained_strict=False,\n default_cfg=default_cfgs[variant],\n **kwargs)\n return model\n\n\n@register_model\ndef twins_pcpvt_small(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],\n depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], **kwargs)\n return _create_twins('twins_pcpvt_small', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef twins_pcpvt_base(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],\n depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], **kwargs)\n return _create_twins('twins_pcpvt_base', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef twins_pcpvt_large(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],\n depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], **kwargs)\n return _create_twins('twins_pcpvt_large', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef twins_svt_small(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4],\n depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs)\n return _create_twins('twins_svt_small', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef twins_svt_base(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4],\n depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs)\n return _create_twins('twins_svt_base', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef twins_svt_large(pretrained=False, **kwargs):\n model_kwargs = dict(\n patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4],\n depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs)\n return _create_twins('twins_svt_large', pretrained=pretrained, **model_kwargs)\n" ]
[ [ "torch.nn.Dropout", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.LayerNorm", "torch.nn.Linear", "torch.nn.Identity", "torch.nn.functional.pad" ] ]
ben0110/frustum-pointnets_RSC_RADAR_fil_PC_batch_para
[ "a796fd4a775179c4daa342872ea1bbc5ba5d5026" ]
[ "train/provider_dfd_test.py" ]
[ "''' Provider class and helper functions for Frustum PointNets.\n\nAuthor: Charles R. Qi\nDate: September 2017\n'''\nfrom __future__ import print_function\n\n#import cPickle as pickle\n#import pcl\nimport sys\nimport csv\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(ROOT_DIR, 'models'))\nfrom box_util import box3d_iou\n#from model_util import g_type2class, g_class2type, g_type2onehotclass\n#from model_util import g_type_mean_size\n#from model_util import NUM_HEADING_BIN, NUM_SIZE_CLUSTER\nfrom dataset import KittiDataset\nfrom collections import Counter\nimport kitti_utils\ntry:\n raw_input # Python 2\nexcept NameError:\n raw_input = input # Python 3\n\n\ndef rotate_pc_along_y(pc, rot_angle):\n '''\n Input:\n pc: numpy array (N,C), first 3 channels are XYZ\n z is facing forward, x is left ward, y is downward\n rot_angle: rad scalar\n Output:\n pc: updated pc with XYZ rotated\n '''\n cosval = np.cos(rot_angle)\n sinval = np.sin(rot_angle)\n rotmat = np.array([[cosval, -sinval], [sinval, cosval]])\n pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))\n return pc\n\n\ndef angle2class(angle, num_class):\n ''' Convert continuous angle to discrete class and residual.\n\n Input:\n angle: rad scalar, from 0-2pi (or -pi~pi), class center at\n 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)\n num_class: int scalar, number of classes N\n Output:\n class_id, int, among 0,1,...,N-1\n residual_angle: float, a number such that\n class*(2pi/N) + residual_angle = angle\n '''\n angle = angle % (2 * np.pi)\n assert (angle >= 0 and angle <= 2 * np.pi)\n angle_per_class = 2 * np.pi / float(num_class)\n shifted_angle = (angle + angle_per_class / 2) % (2 * np.pi)\n class_id = int(shifted_angle / angle_per_class)\n residual_angle = shifted_angle - \\\n (class_id * angle_per_class + angle_per_class / 2)\n return class_id, residual_angle\n\n\ndef class2angle(pred_cls, residual, num_class, to_label_format=True):\n ''' Inverse function to angle2class.\n If to_label_format, adjust angle to the range as in labels.\n '''\n angle_per_class = 2 * np.pi / float(num_class)\n angle_center = pred_cls * angle_per_class\n angle = angle_center + residual\n if to_label_format and angle > np.pi:\n angle = angle - 2 * np.pi\n return angle\n\n\ndef size2class(size, type_name):\n ''' Convert 3D bounding box size to template class and residuals.\n todo (rqi): support multiple size clusters per type.\n\n Input:\n size: numpy array of shape (3,) for (l,w,h)\n type_name: string\n Output:\n size_class: int scalar\n size_residual: numpy array of shape (3,)\n '''\n size_class = g_type2class[type_name]\n size_residual = size - g_type_mean_size[type_name]\n return size_class, size_residual\n\n\ndef class2size(pred_cls, residual):\n ''' Inverse function to size2class. '''\n mean_size = g_type_mean_size[g_class2type[pred_cls]]\n return mean_size + residual\n\"\"\"\ndef get_closest_radar_point(center,input_radar):\n cloud = pcl.PointCloud()\n cloud.from_array(input_radar[:,0:3])\n center_pc = pcl.PoinCloud()\n center_pc.from_array(center)\n kdtree = cloud\n [ind,sqdist] = kdtree.nearst_k_search_for_cloud(center_pc,0)\n closest_radar_point=np.array([cloud[ind[0][0]][0],cloud[ind[0][0]][1],cloud[ind[0][0]][2]])\n\"\"\"\ndef get_radar_mask(input,input_radar):\n radar_mask = np.zeros((input.shape[0]), dtype=np.float32)\n gt_boxes3d = np.zeros((len(input_radar), 7), dtype=np.float32)\n for k in range(len(input_radar)):\n gt_boxes3d[k,0:3]= input_radar[k,0:3]\n gt_boxes3d[k, 3]= 4.0\n gt_boxes3d[k, 4]= 4.0\n gt_boxes3d[k, 5]= 4.0 # np.tan(45*np.pi/180)*input_radar[k,2]*2\n gt_corners = kitti_utils.boxes3d_to_corners3d(gt_boxes3d, transform=False)\n for k in range(len(gt_corners)):\n box_corners = gt_corners[k]\n fg_pt_flag = kitti_utils.in_hull(input[:,0:3], box_corners)\n radar_mask[fg_pt_flag] = 1.0\n return radar_mask,gt_boxes3d[k, 5]\n\n\n\n\n\nclass FrustumDataset(object):\n ''' Dataset class for Frustum PointNets training/evaluation.\n Load prepared KITTI data from pickled files, return individual data element\n [optional] along with its annotations.\n '''\n\n def __init__(self, npoints, split,\n random_flip=False, random_shift=False, rotate_to_center=False,\n overwritten_data_path=None, from_rgb_detection=False, one_hot=False):\n '''\n Input:\n npoints: int scalar, number of points for frustum point cloud.\n split: string, train or val\n random_flip: bool, in 50% randomly flip the point cloud\n in left and right (after the frustum rotation if any)\n random_shift: bool, if True randomly shift the point cloud\n back and forth by a random distance\n rotate_to_center: bool, whether to do frustum rotation\n overwritten_data_path: string, specify pickled file path.\n if None, use default path (with the split)\n from_rgb_detection: bool, if True we assume we do not have\n groundtruth, just return data elements.\n one_hot: bool, if True, return one hot vector\n '''\n self.dataset_kitti = KittiDataset(root_dir='/home/amben/frustum-pointnets_RSC/dataset/', mode='TRAIN', split=split)\n\n self.npoints = npoints\n self.random_flip = random_flip\n self.random_shift = random_shift\n self.rotate_to_center = rotate_to_center\n self.one_hot = one_hot\n if overwritten_data_path is None:\n overwritten_data_path = os.path.join(ROOT_DIR,\n 'kitti/frustum_carpedcyc_%s.pickle' % (split))\n\n self.from_rgb_detection = from_rgb_detection\n if from_rgb_detection:\n with open(overwritten_data_path, 'rb') as fp:\n self.id_list = pickle.load(fp)\n self.box2d_list = pickle.load(fp)\n self.input_list = pickle.load(fp)\n self.type_list = pickle.load(fp)\n # frustum_angle is clockwise angle from positive x-axis\n self.frustum_angle_list = pickle.load(fp)\n self.prob_list = pickle.load(fp)\n else:\n #with open(overwritten_data_path, 'rb') as fp:\n #load list of frames\n self.id_list = self.dataset_kitti.sample_id_list\n print(\"id_list\",len(self.id_list))\n #fil = np.zeros((len(self.id_list)))\n #for i in range(len(self.id_list)):\n # print(self.id_list[i])\n # gt_obj_list = self.dataset_kitti.filtrate_objects(self.dataset_kitti.get_label(self.id_list[i]))\n # print(len(gt_obj_list))\n #gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)\n #print(gt_boxes3d)\n # if(len(gt_obj_list)==1):\n # fil[i]=1\n\n #self.id_list= np.extract(fil,self.id_list)\n self.index_batch = []\n self.box_present = []\n self.radar_OI = []\n self.radar_mask_len = []\n self.cls_labels_len = []\n self.cls_labels_orig = []\n self.width_list=[]\n\n for i in range(len(self.id_list)):\n pc_input = self.dataset_kitti.get_lidar(self.id_list[i])\n pc_radar = self.dataset_kitti.get_radar(self.id_list[i])\n gt_obj_list = self.dataset_kitti.filtrate_objects(self.dataset_kitti.get_label(self.id_list[i]))\n\n gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)\n corners3d = kitti_utils.boxes3d_to_corners3d(gt_boxes3d, transform=True)\n\n cls_label = np.zeros((pc_input.shape[0]), dtype=np.int32)\n gt_corners = kitti_utils.boxes3d_to_corners3d(gt_boxes3d, transform=True)\n for k in range(gt_boxes3d.shape[0]):\n box_corners = gt_corners[k]\n fg_pt_flag = kitti_utils.in_hull(pc_input[:, 0:3], box_corners)\n cls_label[fg_pt_flag] = k+1\n print(\"indice\",self.id_list[i] )\n print(\"number of boxes\",gt_boxes3d.shape[0])\n print(\"cls_label nubmer\", np.count_nonzero(cls_label > 0 ))\n print(\"number of radar pts present in frame\",len(pc_radar))\n for j in range(len(pc_radar)):\n radar_mask,width = get_radar_mask(pc_input, pc_radar[j].reshape(-1, 3))\n # check label present in radar ROI\n print(\"radar_mask\",np.count_nonzero(radar_mask== 1))\n label_radar_intersection = radar_mask*cls_label\n\n print(\"intersection\", np.count_nonzero(label_radar_intersection > 0))\n # if there's one\n labels_present=[]\n for m in range(gt_boxes3d.shape[0]):\n if(np.isin(m+1,label_radar_intersection)):\n labels_present.append(m+1)\n print(\"labels present\", labels_present)\n if (len(labels_present)==1):\n self.radar_mask_len.append(np.count_nonzero(radar_mask == 1))\n self.index_batch.append(self.id_list[i])\n self.radar_OI.append(j)\n self.box_present.append(labels_present[0])\n self.cls_labels_len.append(np.count_nonzero(label_radar_intersection > 0))\n self.cls_labels_orig.append(np.count_nonzero(cls_label == labels_present[0]))\n self.width_list.append(width)\n print(\"retained indices\",self.index_batch)\n print(\"len retained indices\", len(self.index_batch))\n with open('radar_batches_stats_'+split+'_static_4mw.csv',mode='w') as csv_file:\n fieldnames= ['index_batch','radar_mask_len','radar_OI','box_present','cls_labels_len','width']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n for i in range(len(self.index_batch)):\n writer.writerow({'index_batch':self.index_batch[i],'radar_mask_len': self.radar_mask_len[i], 'radar_OI': self.radar_OI[i], 'box_present': self.box_present[i],'cls_labels_len':self.cls_labels_len[i],'width':self.width_list[i]})\n # keep this as a batch\n # if there's isn't\n # than forget about it\n\n\n #print(\"id_list_filtered\", len(self.id_list))\n \"\"\"\n self.input_list=[]\n self.box3d_list=[]\n self.label_list=[]\n self.type_list=[]\n self.heading_list=[]\n self.size_list=[]\n self.frustum_angle_list=[]\n for i in range(len(self.id_list)):\n\n #BOX3D_IN_CORNERS FORMAT\n gt_obj_list = dataset_kitti.filtrate_objects(dataset_kitti.get_label(self.id_list[i]))\n gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)\n print(gt_boxes3d)\n self.box3d_list.append(kitti_utils.boxes3d_to_corners3d(gt_boxes3d, transform=True))\n\n #INPUT_DATA_LIST\n input = dataset_kitti.get_lidar(self.id_list[i])\n self.input_list.append(input)\n\n #LABEL_LIST\n cls_label = np.zeros((self.input_list[i].shape[0]), dtype=np.int32)\n gt_corners = kitti_utils.boxes3d_to_corners3d(gt_boxes3d, transform=True)\n for k in range(gt_boxes3d.shape[0]):\n box_corners = gt_corners[k]\n fg_pt_flag = kitti_utils.in_hull(self.input_list[i][:,0:3], box_corners)\n cls_label[fg_pt_flag] = 1\n\n #print(cls_label.shape)\n print(\"cls_label\", (np.count_nonzero(cls_label == 1)))\n \n label_pts = np.ndarray((cls_label_count, 3))\n j = 0\n c = np.ndarray((len(input), 3))\n\n for i in range(len(input)):\n if (cls_label[i] == 1):\n c[i] = np.array([1.0, 0.0, 0.0])\n label_pts[j] = input[i,0:3]\n j = j + 1\n else:\n c[i] = np.array([0.0, 0.0, 1.0])\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.scatter(label_pts[:, 0], label_pts[:, 1], label_pts[:, 2])\n plt.show()\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.scatter(input[:, 0], input[:, 1], input[:, 2], c=c, s=1)\n plt.show()\n \n self.label_list.append(cls_label)\n\n #TYPE_LIST\n self.type_list.append(\"Pedestrian\")\n #HEADING_LIST\n self.heading_list.append(gt_boxes3d[:,6])\n\n #SIZE_LIST l,w,h\n self.size_list.append(gt_boxes3d[:,3:6])\n #frustum_angle with 0.0 populate\n self.frustum_angle_list.append(0.0)\n \"\"\"\n # box2d in corners format\n #self.box2d_list = pickle.load(fp)\n # box3d in corners format\n #self.box3d_list = pickle.load(fp)\n # point cloud, hole or frustum filtered? looks like frustrum filtered because number of pc is too small\n #self.input_list = pickle.load(fp)\n # from frustrum point cloud which one belongs to label\n #self.label_list = pickle.load(fp)\n # for each 2d box/frustrum point cloud, detected object\n #self.type_list = pickle.load(fp)\n # rotation of 3d label box (ry)\n #self.heading_list = pickle.load(fp)\n # array of l,w,h\n #self.size_list = pickle.load(fp)\n # frustum_angle is clockwise angle from positive x-axis\n #self.frustum_angle_list = pickle.load(fp)\n\n\n def __len__(self):\n return len(self.id_list)\n\n def __getitem__(self, index):\n ''' Get index-th element from the picked file dataset. '''\n # ------------------------------ INPUTS ----------------------------\n #rot_angle = self.get_center_view_rot_angle(index)\n # load radar points\n input_radar = self.dataset_kitti.get_radar(self.id_list[index])\n input = self.dataset_kitti.get_lidar(self.id_list[index])\n radar_mask = get_radar_mask(input, input_radar)\n num_point_fil = np.count_nonzero(radar_mask == 1)\n radar_idx =np.argwhere(radar_mask==1)\n input = input[radar_idx.reshape(-1)]\n print(input.shape)\n\n pts_rect = input[:, 0:3]\n pts_intensity = input[:, 3:]\n if self.npoints < len(pts_rect):\n\n # print(len(pts_rect))\n print(pts_rect.shape)\n pts_depth = pts_rect[:, 2]\n pts_near_flag = pts_depth < 20.0\n far_idxs_choice = np.where(pts_near_flag == 0)[0]\n near_idxs = np.where(pts_near_flag == 1)[0]\n # print(len(pts_depth),len(far_idxs_choice),len(near_idxs),self.npoints, self.npoints - len(far_idxs_choice))\n near_idxs_choice = np.random.choice(near_idxs, self.npoints - len(far_idxs_choice), replace=False)\n\n choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \\\n if len(far_idxs_choice) > 0 else near_idxs_choice\n np.random.shuffle(choice)\n else:\n if (self.npoints / 2) > len(pts_rect):\n diff = int(self.npoints / 2 - len(pts_rect))\n # print(diff)\n add_pts = np.zeros((diff, 3), dtype=np.float32)\n add_int = np.zeros((diff, 3), dtype=np.float32)\n # print(\"add_int\", add_int[0])\n pts_rect = np.concatenate((pts_rect, add_pts), axis=0)\n pts_intensity = np.concatenate((pts_intensity, add_int), axis=0)\n choice = np.arange(0, len(pts_rect), dtype=np.int32)\n if self.npoints > len(pts_rect):\n # print(len(pts_rect),self.npoints - len(pts_rect))\n extra_choice = np.random.choice(choice, self.npoints - len(pts_rect), replace=False)\n choice = np.concatenate((choice, extra_choice), axis=0)\n np.random.shuffle(choice)\n # print(len(pts_rect))\n ret_pts_rect = pts_rect[choice, :]\n # ret_pts_rect=pts_rect\n # TODO don't use intensity feature or try a method to add rgb\n # ret_pts_intensity = pts_intensity[choice] - 0.5 # translate intensity to [-0.5, 0.5]\n ret_pts_intensity = pts_intensity[choice]\n pts_features = [ret_pts_intensity.reshape(-1, 3)]\n ret_pts_features = np.concatenate(pts_features, axis=1) if pts_features.__len__() > 1 else pts_features[0]\n input = np.concatenate((ret_pts_rect, ret_pts_features), axis=1)\n\n radar_mask = get_radar_mask(input,input_radar)\n\n\n gt_obj_list = self.dataset_kitti.filtrate_objects(self.dataset_kitti.get_label(self.id_list[index]))\n gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)\n corners3d = kitti_utils.boxes3d_to_corners3d(gt_boxes3d, transform=True)\n\n cls_label = np.zeros((input.shape[0]), dtype=np.int32)\n gt_corners = kitti_utils.boxes3d_to_corners3d(gt_boxes3d, transform=True)\n for k in range(gt_boxes3d.shape[0]):\n box_corners = gt_corners[k]\n fg_pt_flag = kitti_utils.in_hull(input[:, 0:3], box_corners)\n cls_label[fg_pt_flag] = 1\n\n type=\"Pedestrian\"\n center = gt_boxes3d[:,0:3]\n #closest_radar_point = get_closest_radar_point(center,input_radar)\n heading = gt_boxes3d[:, 6]\n\n size=gt_boxes3d[:, 3:6]\n # frustum_angle with 0.0 populate\n frustum_angle=0.0\n\n\n rot_angle=0.0\n # Compute one hot vector\n if self.one_hot:\n cls_type = type\n assert (cls_type in ['Car', 'Pedestrian', 'Cyclist'])\n one_hot_vec = np.zeros((3))\n one_hot_vec[g_type2onehotclass[cls_type]] = 1\n\n # Get point cloud\n if self.rotate_to_center:\n point_set = self.get_center_view_point_set(input,frustum_angle)\n else:\n point_set = input\n # Resample\n\n #print(point_set.shape[0],self.npoints)\n #choice = np.random.choice(point_set.shape[0], self.npoints, replace=True)\n #print(len(choice))\n #point_set = point_set[choice, :]\n\n if self.from_rgb_detection:\n if self.one_hot:\n return point_set, rot_angle, self.prob_list[index], one_hot_vec\n else:\n return point_set, rot_angle, self.prob_list[index]\n\n # ------------------------------ LABELS ----------------------------\n seg = cls_label\n #seg = seg[choice]\n #print(\"batch seg 3asba:\", np.count_nonzero(seg == 1))\n\n # Get center point of 3D box\n if self.rotate_to_center:\n box3d_center = self.get_center_view_box3d_center(corners3d,frustum_angle)\n else:\n box3d_center = self.get_box3d_center(corners3d)\n\n # Heading\n if self.rotate_to_center:\n heading_angle = heading - rot_angle\n else:\n heading_angle = heading\n\n # Size\n size_class, size_residual = size2class(size,\n type)\n\n # Data Augmentation\n if self.random_flip:\n # note: rot_angle won't be correct if we have random_flip\n # so do not use it in case of random flipping.\n if np.random.random() > 0.5: # 50% chance flipping\n point_set[:, 0] *= -1\n box3d_center[0] *= -1\n heading_angle = np.pi - heading_angle\n if self.random_shift:\n dist = np.sqrt(np.sum(box3d_center[0] ** 2 + box3d_center[1] ** 2))\n shift = np.clip(np.random.randn() * dist * 0.05, dist * 0.8, dist * 1.2)\n point_set[:, 2] += shift\n box3d_center[2] += shift\n\n angle_class, angle_residual = angle2class(heading_angle,\n NUM_HEADING_BIN)\n #print(angle_class,angle_residual)\n if self.one_hot:\n return point_set, seg, box3d_center, angle_class, angle_residual, \\\n size_class, size_residual, rot_angle, one_hot_vec,radar_mask\n else:\n return point_set, seg, box3d_center, angle_class, angle_residual, \\\n size_class, size_residual, rot_angle,radar_mask\n\n def get_center_view_rot_angle(self, frustrum_angle):\n ''' Get the frustum rotation angle, it isshifted by pi/2 so that it\n can be directly used to adjust GT heading angle '''\n return 0.0 #np.pi / 2.0 + self.frustum_angle_list[index]\n\n def get_box3d_center(self, corners3d):\n ''' Get the center (XYZ) of 3D bounding box. '''\n corners3d= corners3d.reshape((8,3))\n box3d_center = (corners3d[0, :] + \\\n corners3d[6, :]) / 2.0\n return box3d_center\n\n def get_center_view_box3d_center(self, box3d,frustrum_angle):\n ''' Frustum rotation of 3D bounding box center. '''\n box3d= box3d.reshape((8,3))\n box3d_center = (box3d[0, :] + box3d[6, :]) / 2.0\n rotate_pc_along_y(np.expand_dims(box3d_center, 0), self.get_center_view_rot_angle(frustrum_angle)).squeeze()\n\n return rotate_pc_along_y(np.expand_dims(box3d_center, 0), self.get_center_view_rot_angle(frustrum_angle)).squeeze()\n\n def get_center_view_box3d(self, index):\n ''' Frustum rotation of 3D bounding box corners. '''\n box3d = self.box3d_list[index]\n box3d_center_view = np.copy(box3d)\n return rotate_pc_along_y(box3d_center_view, \\\n self.get_center_view_rot_angle(index))\n\n def get_center_view_point_set(self, input,frustrum_angle ):\n ''' Frustum rotation of point clouds.\n NxC points with first 3 channels as XYZ\n z is facing forward, x is left ward, y is downward\n '''\n # Use np.copy to avoid corrupting original data\n point_set = np.copy(input)\n return rotate_pc_along_y(point_set, \\\n self.get_center_view_rot_angle(frustrum_angle))\n\n\n# ----------------------------------\n# Helper functions for evaluation\n# ----------------------------------\n\ndef get_3d_box(box_size, heading_angle, center):\n ''' Calculate 3D bounding box corners from its parameterization.\n\n Input:\n box_size: tuple of (l,w,h)\n heading_angle: rad scalar, clockwise from pos x axis\n center: tuple of (x,y,z)\n Output:\n corners_3d: numpy array of shape (8,3) for 3D box cornders\n '''\n\n def roty(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, 0, s],\n [0, 1, 0],\n [-s, 0, c]])\n\n R = roty(heading_angle)\n l, w, h = box_size\n x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2];\n y_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2];\n z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2];\n corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))\n corners_3d[0, :] = corners_3d[0, :] + center[0];\n corners_3d[1, :] = corners_3d[1, :] + center[1];\n corners_3d[2, :] = corners_3d[2, :] + center[2];\n corners_3d = np.transpose(corners_3d)\n return corners_3d\n\n\ndef compute_box3d_iou(center_pred,\n heading_logits, heading_residuals,\n size_logits, size_residuals,\n center_label,\n heading_class_label, heading_residual_label,\n size_class_label, size_residual_label):\n ''' Compute 3D bounding box IoU from network output and labels.\n All inputs are numpy arrays.\n\n Inputs:\n center_pred: (B,3)\n heading_logits: (B,NUM_HEADING_BIN)\n heading_residuals: (B,NUM_HEADING_BIN)\n size_logits: (B,NUM_SIZE_CLUSTER)\n size_residuals: (B,NUM_SIZE_CLUSTER,3)\n center_label: (B,3)\n heading_class_label: (B,)\n heading_residual_label: (B,)\n size_class_label: (B,)\n size_residual_label: (B,3)\n Output:\n iou2ds: (B,) birdeye view oriented 2d box ious\n iou3ds: (B,) 3d box ious\n '''\n batch_size = heading_logits.shape[0]\n heading_class = np.argmax(heading_logits, 1) # B\n heading_residual = np.array([heading_residuals[i, heading_class[i]] \\\n for i in range(batch_size)]) # B,\n size_class = np.argmax(size_logits, 1) # B\n size_residual = np.vstack([size_residuals[i, size_class[i], :] \\\n for i in range(batch_size)])\n\n iou2d_list = []\n iou3d_list = []\n for i in range(batch_size):\n heading_angle = class2angle(heading_class[i],\n heading_residual[i], NUM_HEADING_BIN)\n box_size = class2size(size_class[i], size_residual[i])\n corners_3d = get_3d_box(box_size, heading_angle, center_pred[i])\n\n heading_angle_label = class2angle(heading_class_label[i],\n heading_residual_label[i], NUM_HEADING_BIN)\n box_size_label = class2size(size_class_label[i], size_residual_label[i])\n corners_3d_label = get_3d_box(box_size_label,\n heading_angle_label, center_label[i])\n\n iou_3d, iou_2d = box3d_iou(corners_3d, corners_3d_label)\n iou3d_list.append(iou_3d)\n iou2d_list.append(iou_2d)\n return np.array(iou2d_list, dtype=np.float32), \\\n np.array(iou3d_list, dtype=np.float32)\n\n\ndef from_prediction_to_label_format(center, angle_class, angle_res, \\\n size_class, size_res, rot_angle):\n ''' Convert predicted box parameters to label format. '''\n l, w, h = class2size(size_class, size_res)\n ry = class2angle(angle_class, angle_res, NUM_HEADING_BIN) + rot_angle\n tx, ty, tz = rotate_pc_along_y(np.expand_dims(center, 0), -rot_angle).squeeze()\n ty += h / 2.0\n return h, w, l, tx, ty, tz, ry\n\n\nif __name__ == '__main__':\n import mayavi.mlab as mlab\n\n sys.path.append(os.path.join(ROOT_DIR, 'mayavi'))\n from viz_util import draw_lidar, draw_gt_boxes3d\n\n median_list = []\n dataset = FrustumDataset(1024, split='val',\n rotate_to_center=True, random_flip=True, random_shift=True)\n for i in range(len(dataset)):\n data = dataset[i]\n print(('Center: ', data[2], \\\n 'angle_class: ', data[3], 'angle_res:', data[4], \\\n 'size_class: ', data[5], 'size_residual:', data[6], \\\n 'real_size:', g_type_mean_size[g_class2type[data[5]]] + data[6]))\n print(('Frustum angle: ', dataset.frustum_angle_list[i]))\n median_list.append(np.median(data[0][:, 0]))\n print((data[2], dataset.box3d_list[i], median_list[-1]))\n box3d_from_label = get_3d_box(class2size(data[5], data[6]), class2angle(data[3], data[4], 12), data[2])\n\n ps = data[0]\n seg = data[1]\n fig = mlab.figure(figure=None, bgcolor=(0.4, 0.4, 0.4), fgcolor=None, engine=None, size=(1000, 500))\n mlab.points3d(ps[:, 0], ps[:, 1], ps[:, 2], seg, mode='point', colormap='gnuplot', scale_factor=1, figure=fig)\n mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2, figure=fig)\n draw_gt_boxes3d([box3d_from_label], fig, color=(1, 0, 0))\n mlab.orientation_axes()\n raw_input()\n print(np.mean(np.abs(median_list)))\n" ]
[ [ "numpy.expand_dims", "numpy.concatenate", "numpy.random.randn", "numpy.where", "numpy.sin", "numpy.copy", "numpy.argmax", "numpy.count_nonzero", "numpy.zeros", "numpy.isin", "numpy.median", "numpy.transpose", "numpy.array", "numpy.sum", "numpy.random.random", "numpy.abs", "numpy.cos", "numpy.random.shuffle", "numpy.argwhere", "numpy.vstack" ] ]
labscript-suite-temp-archive/lyse-fork--rpanderson-lyse--forked-from--labscript_suite-lyse
[ "ca5bfe2f1f8dd98cc5b4505736a74d08d562e1fd" ]
[ "dataframe_utilities.py" ]
[ "#####################################################################\r\n# #\r\n# /dataframe_utilities.py #\r\n# #\r\n# Copyright 2013, Monash University #\r\n# #\r\n# This file is part of the program lyse, in the labscript suite #\r\n# (see http://labscriptsuite.org), and is licensed under the #\r\n# Simplified BSD License. See the license.txt file in the root of #\r\n# the project for the full license. #\r\n# #\r\n#####################################################################\r\n\r\nfrom __future__ import division, unicode_literals, print_function, absolute_import\r\nfrom labscript_utils import PY2\r\nif PY2:\r\n str = unicode\r\n \r\nimport labscript_utils.h5_lock, h5py\r\nimport pandas\r\nimport os\r\nfrom numpy import *\r\nimport tzlocal\r\nimport labscript_utils.shared_drive\r\nfrom labscript_utils.dict_diff import dict_diff\r\nfrom labscript_utils.connections import _ensure_str\r\nfrom labscript_utils.properties import get_attributes\r\nimport runmanager\r\n\r\n\r\ndef asdatetime(timestr):\r\n if isinstance(timestr, bytes):\r\n timestr = timestr.decode('utf-8')\r\n tz = tzlocal.get_localzone().zone\r\n return pandas.Timestamp(timestr, tz=tz)\r\n\r\ndef get_nested_dict_from_shot(filepath):\r\n row = runmanager.get_shot_globals(filepath)\r\n with h5py.File(filepath,'r') as h5_file:\r\n if 'results' in h5_file:\r\n for groupname in h5_file['results']:\r\n resultsgroup = h5_file['results'][groupname]\r\n row[groupname] = get_attributes(resultsgroup)\r\n if 'images' in h5_file:\r\n for orientation in h5_file['images'].keys():\r\n if isinstance(h5_file['images'][orientation], h5py.Group):\r\n row[orientation] = get_attributes(h5_file['images'][orientation])\r\n for label in h5_file['images'][orientation]:\r\n row[orientation][label] = {}\r\n group = h5_file['images'][orientation][label]\r\n for image in group:\r\n row[orientation][label][image] = {}\r\n for key, val in get_attributes(group[image]).items():\r\n if not isinstance(val, h5py.Reference):\r\n row[orientation][label][image][key] = val\r\n row['filepath'] = _ensure_str(filepath)\r\n row['agnostic_path'] = labscript_utils.shared_drive.path_to_agnostic(filepath)\r\n seq_id = _ensure_str(h5_file.attrs['sequence_id'])\r\n row['sequence'] = asdatetime(seq_id.split('_')[0])\r\n try:\r\n row['sequence_index'] = h5_file.attrs['sequence_index']\r\n except KeyError:\r\n row['sequence_index'] = None\r\n if 'script' in h5_file: \r\n row['labscript'] = _ensure_str(h5_file['script'].attrs['name'])\r\n try:\r\n row['run time'] = asdatetime(_ensure_str(h5_file.attrs['run time']))\r\n except KeyError:\r\n row['run time'] = float('nan')\r\n try: \r\n row['run number'] = h5_file.attrs['run number']\r\n except KeyError:\r\n row['run number'] = float('nan')\r\n try:\r\n row['run repeat'] = h5_file.attrs['run repeat']\r\n except KeyError:\r\n row['run repeat'] = 0\r\n return row\r\n \r\ndef flatten_dict(dictionary, keys=tuple()):\r\n \"\"\"Takes a nested dictionary whose keys are strings, and returns a\r\n flat dictionary whose keys are tuples of strings, each element of\r\n which is the key for one level of the hierarchy.\"\"\"\r\n result = {}\r\n for name in dictionary:\r\n if isinstance(dictionary[name],dict):\r\n flat = flatten_dict(dictionary[name],keys=keys + (name,))\r\n result.update(flat)\r\n else:\r\n result[keys + (name,)] = dictionary[name]\r\n return result\r\n \r\ndef flat_dict_to_hierarchical_dataframe(dictionary):\r\n \"\"\"Make all the keys tuples of the same length\"\"\"\r\n max_tuple_length = 2 # Must have at least two levels to make a MultiIndex\r\n for key in dictionary:\r\n max_tuple_length = max(max_tuple_length,len(key))\r\n result = {}\r\n for key in dictionary:\r\n newkey = key[:]\r\n while len(newkey) < max_tuple_length:\r\n newkey += ('',)\r\n result[newkey] = dictionary[key] \r\n index = pandas.MultiIndex.from_tuples(sorted(result.keys()))\r\n return pandas.DataFrame([result],columns=index) \r\n\r\ndef flat_dict_to_flat_series(dictionary):\r\n result = {}\r\n for key in dictionary:\r\n if len(key) > 1:\r\n result[key] = dictionary[key]\r\n else:\r\n result[key[0]] = dictionary[key]\r\n keys = list(result.keys())\r\n keys.sort(key = lambda item: \r\n (len(item),) + item if isinstance(item, tuple) else (1,item))\r\n return pandas.Series(result,index=keys) \r\n \r\ndef get_dataframe_from_shot(filepath):\r\n nested_dict = get_nested_dict_from_shot(filepath)\r\n flat_dict = flatten_dict(nested_dict)\r\n df = flat_dict_to_hierarchical_dataframe(flat_dict)\r\n return df\r\n \r\ndef get_dataframe_from_shots(filepaths):\r\n return concat_with_padding(*[get_dataframe_from_shot(filepath) for filepath in filepaths])\r\n\r\ndef get_series_from_shot(filepath):\r\n nested_dict = get_nested_dict_from_shot(filepath)\r\n flat_dict = flatten_dict(nested_dict)\r\n s = flat_dict_to_flat_series(flat_dict)\r\n return s\r\n \r\ndef pad_columns(df, n):\r\n \"\"\"Add depth to hiererchical column labels with empty strings\"\"\"\r\n if df.columns.nlevels == n:\r\n return df\r\n new_columns = []\r\n data = {}\r\n for column in df.columns:\r\n new_column = column + ('',)*(n-len(column))\r\n new_columns.append(new_column)\r\n data[new_column] = df[column]\r\n index = pandas.MultiIndex.from_tuples(new_columns)\r\n return pandas.DataFrame(data,columns = index)\r\n\r\ndef concat_with_padding(*dataframes):\r\n \"\"\"Concatenates dataframes with MultiIndex column labels,\r\n padding shallower hierarchies such that the MultiIndexes have\r\n the same nlevels.\"\"\"\r\n dataframes = list(dataframes)\r\n # Remove empty dataframes (these don't concat since pandas 0.18) \r\n dataframes = [df for df in dataframes if not df.empty]\r\n max_nlevels = max(df.columns.nlevels for df in dataframes)\r\n for i, df in enumerate(dataframes):\r\n if df.columns.nlevels < max_nlevels:\r\n dataframes[i] = pad_columns(df, max_nlevels)\r\n return pandas.concat(dataframes, ignore_index=True)\r\n \r\ndef replace_with_padding(df, row, index):\r\n if df.columns.nlevels < row.columns.nlevels:\r\n df = pad_columns(df, row.columns.nlevels)\r\n elif df.columns.nlevels > row.columns.nlevels:\r\n row = pad_columns(row, df.columns.nlevels)\r\n\r\n # Change the index of the row object to equal that of where it is to be\r\n # inserted:\r\n row.index = pandas.Int64Index([index])\r\n\r\n # Replace the target row in the dataframe by dropping, appending, then\r\n # sorting by index:\r\n df = df.drop([index])\r\n df = df.append(row)\r\n df = df.sort_index()\r\n return df\r\n \r\n\r\n \r\n" ]
[ [ "pandas.concat", "pandas.Series", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "pandas.Int64Index", "pandas.Timestamp" ] ]
FINNGEN/hail
[ "03fabf5dad71415aeca641ef1618e5352639d683" ]
[ "benchmark-service/benchmark/benchmark.py" ]
[ "import asyncio\nimport os\nimport aiohttp\nfrom aiohttp import web\nimport logging\nfrom gear import setup_aiohttp_session, web_authenticated_developers_only\nfrom hailtop.config import get_deploy_config\nfrom hailtop.tls import internal_server_ssl_context\nfrom hailtop.hail_logging import AccessLogger, configure_logging\nfrom hailtop.utils import retry_long_running, collect_agen, humanize_timedelta_msecs\nfrom hailtop import aiotools\nimport hailtop.aiogoogle as aiogoogle\nimport hailtop.batch_client.aioclient as bc\nfrom web_common import setup_aiohttp_jinja2, setup_common_static_routes, render_template\nfrom benchmark.utils import (\n get_geometric_mean,\n parse_file_path,\n enumerate_list_of_trials,\n list_benchmark_files,\n round_if_defined,\n submit_test_batch,\n)\nimport json\nimport re\nimport plotly\nimport plotly.express as px\nfrom scipy.stats.mstats import gmean, hmean\nimport numpy as np\nimport pandas as pd\nimport gidgethub\nimport gidgethub.aiohttp\nfrom .config import START_POINT, BENCHMARK_RESULTS_PATH\n\nconfigure_logging()\nrouter = web.RouteTableDef()\nlogging.basicConfig(level=logging.DEBUG)\ndeploy_config = get_deploy_config()\nlog = logging.getLogger('benchmark')\n\nBENCHMARK_FILE_REGEX = re.compile(\n r'gs://((?P<bucket>[^/]+)/)((?P<user>[^/]+)/)((?P<instanceId>[^/]*)/)((?P<version>[^-]+)-)((?P<sha>[^-]+))(-(?P<tag>[^\\.]+))?\\.json'\n)\n\nGH_COMMIT_MESSAGE_REGEX = re.compile(r'(?P<title>.*)\\s\\(#(?P<pr_id>\\d+)\\)(?P<rest>.*)')\n\nBENCHMARK_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nbenchmark_data = {'commits': {}, 'dates': [], 'geo_means': [], 'pr_ids': [], 'shas': []}\n\n\nwith open(os.environ.get('HAIL_CI_OAUTH_TOKEN', 'oauth-token/oauth-token'), 'r') as f:\n oauth_token = f.read().strip()\n\n\nasync def get_benchmarks(app, file_path):\n log.info(f'get_benchmarks file_path={file_path}')\n fs: aiotools.AsyncFS = app['fs']\n try:\n json_data = (await fs.read(file_path)).decode('utf-8')\n pre_data = json.loads(json_data)\n except FileNotFoundError:\n message = f'could not find file, {file_path}'\n log.info('could not get blob: ' + message, exc_info=True)\n return None\n\n data = {}\n prod_of_means = 1\n for d in pre_data['benchmarks']:\n stats = dict()\n stats['name'] = d.get('name')\n stats['failed'] = d.get('failed')\n if not d['failed']:\n prod_of_means *= d.get('mean', 1)\n stats['f-stat'] = round_if_defined(d.get('f-stat'))\n stats['mean'] = round_if_defined(d.get('mean'))\n stats['median'] = round_if_defined(d.get('median'))\n stats['p-value'] = round_if_defined(d.get('p-value'))\n stats['stdev'] = round_if_defined(d.get('stdev'))\n stats['times'] = d.get('times')\n stats['trials'] = d.get('trials')\n data[stats['name']] = stats\n geometric_mean = get_geometric_mean(prod_of_means, len(pre_data['benchmarks']))\n\n file_info = parse_file_path(BENCHMARK_FILE_REGEX, file_path)\n sha = file_info['sha']\n benchmarks = dict()\n benchmarks['sha'] = sha\n benchmarks['geometric_mean'] = geometric_mean\n benchmarks['data'] = data\n return benchmarks\n\n\ndef get_comparisons(benchmarks1, benchmarks2, metric):\n def get_metric(data):\n if metric == 'median':\n return data.get('median')\n assert metric == 'best'\n times = data.get('times')\n if times:\n return min(times)\n return None\n\n d1_keys = set(benchmarks1['data'].keys())\n d2_keys = set(benchmarks2['data'].keys())\n set_of_names = d1_keys.union(d2_keys)\n\n comparisons = []\n for name in set_of_names:\n data1 = benchmarks1['data'].get(name)\n data2 = benchmarks2['data'].get(name)\n if data2 is None:\n comparisons.append((name, get_metric(data1), None))\n elif data1 is None:\n comparisons.append((name, None, get_metric(data2)))\n else:\n comparisons.append((name, get_metric(data1), get_metric(data2)))\n\n return comparisons\n\n\ndef fmt_time(t):\n if t is not None:\n return round(t, 3)\n return None\n\n\ndef fmt_diff(ratio):\n if ratio is not None:\n return round(ratio * 100, 3)\n return None\n\n\ndef final_comparisons(comparisons):\n comps = []\n ratios = []\n final_comps = {}\n for name, r1, r2 in comparisons:\n if r1 is None:\n comps.append((name, None, None, fmt_time(r2)))\n elif r2 is None:\n comps.append((name, None, fmt_time(r1), None))\n else:\n r = r1 / r2\n ratios.append(r)\n comps.append((name, fmt_diff(r), fmt_time(r1), fmt_time(r2)))\n final_comps['comps'] = comps\n if len(ratios) == 0:\n final_comps['harmonic_mean'] = None\n final_comps['geometric_mean'] = None\n final_comps['arithmetic_mean'] = None\n final_comps['median'] = None\n else:\n final_comps['harmonic_mean'] = fmt_diff(hmean(ratios))\n final_comps['geometric_mean'] = fmt_diff(gmean(ratios))\n final_comps['arithmetic_mean'] = fmt_diff(np.mean(ratios))\n final_comps['median'] = fmt_diff(np.median(ratios))\n return final_comps\n\n\[email protected]('/healthcheck')\nasync def healthcheck(request: web.Request) -> web.Response: # pylint: disable=unused-argument\n return web.Response()\n\n\[email protected]('/name/{name}')\n@web_authenticated_developers_only(redirect=False)\nasync def show_name(request: web.Request, userdata) -> web.Response: # pylint: disable=unused-argument\n file_path = request.query.get('file')\n benchmarks = await get_benchmarks(request.app, file_path)\n name_data = benchmarks['data'][str(request.match_info['name'])]\n\n try:\n data = enumerate_list_of_trials(name_data['trials'])\n d = {'trial': data['trial_indices'], 'wall_time': data['wall_times'], 'index': data['within_group_index']}\n df = pd.DataFrame(d)\n fig = px.scatter(df, x=df.trial, y=df.wall_time, hover_data=['index'])\n plot = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n except Exception as e:\n message = 'could not find name'\n log.info('name is of type NoneType: ' + message, exc_info=True)\n raise web.HTTPBadRequest(text=message) from e\n\n context = {'name': request.match_info.get('name', ''), 'plot': plot}\n\n return await render_template('benchmark', request, userdata, 'name.html', context)\n\n\[email protected]('/')\[email protected]('')\nasync def index(request):\n userdata = {}\n global benchmark_data\n d = {\n 'dates': benchmark_data['dates'],\n 'geo_means': benchmark_data['geo_means'],\n 'pr_ids': benchmark_data['pr_ids'],\n 'commits': benchmark_data['shas'],\n }\n assert len(d['dates']) == len(d['geo_means']), d\n df = pd.DataFrame(d)\n if not df.dates.empty:\n fig = px.line(df, x=df.dates, y=df.geo_means, hover_data=['pr_ids', 'commits'])\n fig.update_xaxes(rangeslider_visible=True)\n plot = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n else:\n plot = None\n context = {'commits': benchmark_data['commits'], 'plot': plot, 'benchmark_results_path': BENCHMARK_RESULTS_PATH}\n return await render_template('benchmark', request, userdata, 'index.html', context)\n\n\[email protected]('/lookup')\n@web_authenticated_developers_only(redirect=False)\nasync def lookup(request, userdata): # pylint: disable=unused-argument\n app = request.app\n file = request.query.get('file')\n if file is None:\n benchmarks_context = None\n else:\n benchmarks_context = await get_benchmarks(request.app, file)\n context = {\n 'file': file,\n 'benchmarks': benchmarks_context,\n 'benchmark_file_list': await list_benchmark_files(app['fs']),\n }\n return await render_template('benchmark', request, userdata, 'lookup.html', context)\n\n\[email protected]('/compare')\n@web_authenticated_developers_only(redirect=False)\nasync def compare(request, userdata): # pylint: disable=unused-argument\n app = request.app\n file1 = request.query.get('file1')\n file2 = request.query.get('file2')\n metric = request.query.get('metrics')\n if file1 is None or file2 is None:\n benchmarks_context1 = None\n benchmarks_context2 = None\n comparisons = None\n else:\n benchmarks_context1 = await get_benchmarks(app, file1)\n benchmarks_context2 = await get_benchmarks(app, file2)\n comparisons = final_comparisons(get_comparisons(benchmarks_context1, benchmarks_context2, metric))\n context = {\n 'file1': file1,\n 'file2': file2,\n 'metric': metric,\n 'benchmarks1': benchmarks_context1,\n 'benchmarks2': benchmarks_context2,\n 'comparisons': comparisons,\n 'benchmark_file_list': await list_benchmark_files(app['fs']),\n }\n return await render_template('benchmark', request, userdata, 'compare.html', context)\n\n\[email protected]('/batches/{batch_id}')\n@web_authenticated_developers_only()\nasync def get_batch(request, userdata):\n batch_id = int(request.match_info['batch_id'])\n batch_client = request.app['batch_client']\n b = await batch_client.get_batch(batch_id)\n status = await b.last_known_status()\n jobs = await collect_agen(b.jobs())\n for j in jobs:\n j['duration'] = humanize_timedelta_msecs(j['duration'])\n page_context = {'batch': status, 'jobs': jobs}\n return await render_template('benchmark', request, userdata, 'batch.html', page_context)\n\n\[email protected]('/batches/{batch_id}/jobs/{job_id}')\n@web_authenticated_developers_only()\nasync def get_job(request, userdata):\n batch_id = int(request.match_info['batch_id'])\n job_id = int(request.match_info['job_id'])\n batch_client = request.app['batch_client']\n job = await batch_client.get_job(batch_id, job_id)\n page_context = {\n 'batch_id': batch_id,\n 'job_id': job_id,\n 'job_log': await job.log(),\n 'job_status': json.dumps(await job.status(), indent=2),\n 'attempts': await job.attempts(),\n }\n return await render_template('benchmark', request, userdata, 'job.html', page_context)\n\n\nasync def update_commits(app):\n global benchmark_data\n github_client = app['github_client']\n\n request_string = f'/repos/hail-is/hail/commits?since={START_POINT}'\n log.info(f'start point is {START_POINT}')\n gh_data = await github_client.getitem(request_string)\n log.info(f'gh_data length is {len(gh_data)}')\n\n for gh_commit in gh_data:\n sha = gh_commit.get('sha')\n log.info(f'for commit {sha}')\n await update_commit(app, sha)\n\n log.info('got new commits')\n\n\nasync def get_commit(app, sha): # pylint: disable=unused-argument\n log.info(f'get_commit sha={sha}')\n github_client = app['github_client']\n batch_client = app['batch_client']\n fs: aiotools.AsyncFS = app['fs']\n\n file_path = f'{BENCHMARK_RESULTS_PATH}/0-{sha}.json'\n request_string = f'/repos/hail-is/hail/commits/{sha}'\n gh_commit = await github_client.getitem(request_string)\n\n message = gh_commit['commit']['message']\n match = GH_COMMIT_MESSAGE_REGEX.search(message)\n message_dict = match.groupdict()\n pr_id = message_dict['pr_id']\n title = message_dict['title']\n\n has_results_file = await fs.exists(file_path)\n batch_statuses = [b._last_known_status async for b in batch_client.list_batches(q=f'sha={sha} user:benchmark')]\n complete_batch_statuses = [bs for bs in batch_statuses if bs['complete']]\n running_batch_statuses = [bs for bs in batch_statuses if not bs['complete']]\n\n if has_results_file:\n assert complete_batch_statuses, batch_statuses\n log.info(f'commit {sha} has a results file')\n status = complete_batch_statuses[0]\n batch_id = status['id']\n log.info(f'status of {sha}: {status}')\n elif running_batch_statuses:\n status = running_batch_statuses[0]\n batch_id = status['id']\n log.info(f'batch already exists for commit {sha}')\n else:\n status = None\n batch_id = None\n log.info(f'no batches or results file exists for {sha}')\n\n commit = {\n 'sha': sha,\n 'title': title,\n 'author': gh_commit['commit']['author']['name'],\n 'date': gh_commit['commit']['author']['date'],\n 'status': status,\n 'batch_id': batch_id,\n 'pr_id': pr_id,\n }\n\n return commit\n\n\nasync def update_commit(app, sha): # pylint: disable=unused-argument\n log.info('in update_commit')\n global benchmark_data\n fs: aiotools.AsyncFS = app['fs']\n commit = await get_commit(app, sha)\n file_path = f'{BENCHMARK_RESULTS_PATH}/0-{sha}.json'\n\n if commit['status'] is None:\n batch_client = app['batch_client']\n batch_id = await submit_test_batch(batch_client, sha)\n batch = await batch_client.get_batch(batch_id)\n commit['status'] = batch._last_known_status\n commit['batch_id'] = batch_id\n log.info(f'submitted a batch {batch_id} for commit {sha}')\n benchmark_data['commits'][sha] = commit\n return commit\n\n has_results_file = await fs.exists(file_path)\n if has_results_file and sha in benchmark_data['commits']:\n benchmarks = await get_benchmarks(app, file_path)\n commit['geo_mean'] = benchmarks['geometric_mean']\n geo_mean = commit['geo_mean']\n log.info(f'geo mean is {geo_mean}')\n\n benchmark_data['dates'].append(commit['date'])\n benchmark_data['geo_means'].append(commit['geo_mean'])\n benchmark_data['pr_ids'].append(commit['pr_id'])\n benchmark_data['shas'].append(sha)\n benchmark_data['commits'][sha] = commit\n return commit\n\n\[email protected]('/api/v1alpha/benchmark/commit/{sha}')\nasync def get_status(request): # pylint: disable=unused-argument\n sha = str(request.match_info['sha'])\n app = request.app\n commit = await get_commit(app, sha)\n return web.json_response(commit)\n\n\[email protected]('/api/v1alpha/benchmark/commit/{sha}')\nasync def delete_commit(request): # pylint: disable=unused-argument\n global benchmark_data\n app = request.app\n fs: aiotools.AsyncFS = app['fs']\n batch_client = app['batch_client']\n sha = str(request.match_info['sha'])\n file_path = f'{BENCHMARK_RESULTS_PATH}/0-{sha}.json'\n\n if await fs.exists(file_path):\n await fs.remove(file_path)\n log.info(f'deleted file for sha {sha}')\n\n async for b in batch_client.list_batches(q=f'sha={sha} user:benchmark'):\n await b.delete()\n log.info(f'deleted batch for sha {sha}')\n\n if benchmark_data['commits'].get(sha):\n del benchmark_data['commits'][sha]\n log.info(f'deleted commit {sha} from commit list')\n\n return web.Response()\n\n\[email protected]('/api/v1alpha/benchmark/commit/{sha}')\nasync def call_update_commit(request): # pylint: disable=unused-argument\n body = await request.json()\n sha = body['sha']\n log.info('call_update_commit')\n commit = await update_commit(request.app, sha)\n return web.json_response(commit)\n\n\nasync def github_polling_loop(app):\n while True:\n await update_commits(app)\n log.info('successfully queried github')\n await asyncio.sleep(600)\n\n\nasync def on_startup(app):\n credentials = aiogoogle.auth.Credentials.from_file('/benchmark-gsa-key/key.json')\n app['fs'] = aiogoogle.GoogleStorageAsyncFS(credentials=credentials)\n app['gh_client_session'] = aiohttp.ClientSession()\n app['github_client'] = gidgethub.aiohttp.GitHubAPI(\n app['gh_client_session'], 'hail-is/hail', oauth_token=oauth_token\n )\n app['batch_client'] = bc.BatchClient(billing_project='benchmark')\n app['task_manager'] = aiotools.BackgroundTaskManager()\n app['task_manager'].ensure_future(retry_long_running('github_polling_loop', github_polling_loop, app))\n\n\nasync def on_cleanup(app):\n try:\n await app['gh_client_session'].close()\n finally:\n try:\n await app['fs'].close()\n finally:\n app['task_manager'].shutdown()\n\n\ndef run():\n app = web.Application()\n setup_aiohttp_jinja2(app, 'benchmark')\n setup_aiohttp_session(app)\n\n setup_common_static_routes(router)\n router.static('/static', f'{BENCHMARK_ROOT}/static')\n app.add_routes(router)\n app.on_startup.append(on_startup)\n app.on_cleanup.append(on_cleanup)\n web.run_app(\n deploy_config.prefix_application(app, 'benchmark'),\n host='0.0.0.0',\n port=5000,\n access_log_class=AccessLogger,\n ssl_context=internal_server_ssl_context(),\n )\n" ]
[ [ "numpy.median", "pandas.DataFrame", "scipy.stats.mstats.gmean", "numpy.mean", "scipy.stats.mstats.hmean" ] ]
chaneyddtt/Coarse-to-fine-3D-Animal
[ "b3f9b1031b5761838c94ca091095636101747fd9" ]
[ "model/graph_hg.py" ]
[ "\"\"\"\nThis file contains the Definition of GraphCNN\nGraphCNN includes ResNet50 as a submodule\n\"\"\"\nfrom __future__ import division\n\nimport torch\nimport torch.nn as nn\n\nfrom model.networks.graph_layers import GraphResBlock, GraphLinear\nfrom smal.mesh import Mesh\nfrom smal.smal_torch import SMAL\n\n# encoder-decoder structured GCN with skip connections\nclass GraphCNN_hg(nn.Module):\n\n def __init__(self, mesh, num_channels=256, local_feat=False, num_downsample=0):\n '''\n Args:\n mesh: mesh data that store the adjacency matrix\n num_channels: number of channels of GCN\n local_feat: whether use local feature for refinement\n num_downsample: number of downsampling of the input mesh\n '''\n super(GraphCNN_hg, self).__init__()\n self.A = mesh._A[num_downsample:] # get the correct adjacency matrix because the input might be downsampled\n self.num_layers = len(self.A) - 1\n print(\"Number of downsampling layer: {}\".format(self.num_layers))\n self.num_downsample = num_downsample\n if local_feat:\n self.lin1 = GraphLinear(3 + 2048 + 3840, 2 * num_channels)\n else:\n self.lin1 = GraphLinear(3 + 2048, 2 * num_channels)\n self.res1 = GraphResBlock(2 * num_channels, num_channels, self.A[0])\n encode_layers = []\n decode_layers = []\n\n for i in range(len(self.A)):\n encode_layers.append(GraphResBlock(num_channels, num_channels, self.A[i]))\n\n decode_layers.append(GraphResBlock((i+1)*num_channels, (i+1)*num_channels,\n self.A[len(self.A) - i - 1]))\n current_channels = (i+1)*num_channels\n # number of channels for the input is different because of the concatenation operation\n self.shape = nn.Sequential(GraphResBlock(current_channels, 64, self.A[0]),\n GraphResBlock(64, 32, self.A[0]),\n nn.GroupNorm(32 // 8, 32),\n nn.ReLU(inplace=True),\n GraphLinear(32, 3))\n\n self.encoder = nn.Sequential(*encode_layers)\n self.decoder = nn.Sequential(*decode_layers)\n self.mesh = mesh\n\n def forward(self, verts_c, img_fea_global, img_fea_multiscale=None, points_local=None):\n '''\n Args:\n verts_c: vertices from the coarse estimation\n img_fea_global: global feature for mesh refinement\n img_fea_multiscale: multi-scale feature from the encoder, used for local feature extraction\n points_local: 2D keypoint for local feature extraction\n Returns: refined mesh\n '''\n batch_size = img_fea_global.shape[0]\n ref_vertices = verts_c.transpose(1, 2)\n image_enc = img_fea_global.view(batch_size, 2048, 1).expand(-1, -1, ref_vertices.shape[-1])\n if points_local is not None:\n feat_local = torch.nn.functional.grid_sample(img_fea_multiscale, points_local)\n x = torch.cat([ref_vertices, image_enc, feat_local.squeeze(2)], dim=1)\n else:\n x = torch.cat([ref_vertices, image_enc], dim=1)\n x = self.lin1(x)\n x = self.res1(x)\n x_ = [x]\n for i in range(self.num_layers + 1):\n if i == self.num_layers:\n x = self.encoder[i](x)\n else:\n x = self.encoder[i](x)\n x = self.mesh.downsample(x.transpose(1, 2), n1=self.num_downsample+i, n2=self.num_downsample+i+1)\n x = x.transpose(1, 2)\n if i < self.num_layers-1:\n x_.append(x)\n for i in range(self.num_layers + 1):\n if i == self.num_layers:\n x = self.decoder[i](x)\n else:\n x = self.decoder[i](x)\n x = self.mesh.upsample(x.transpose(1, 2), n1=self.num_layers-i+self.num_downsample,\n n2=self.num_layers-i-1+self.num_downsample)\n x = x.transpose(1, 2)\n x = torch.cat([x, x_[self.num_layers-i-1]], dim=1) # skip connection between encoder and decoder\n\n shape = self.shape(x)\n return shape\n" ]
[ [ "torch.nn.Sequential", "torch.cat", "torch.nn.functional.grid_sample", "torch.nn.GroupNorm", "torch.nn.ReLU" ] ]
yangg1224/AI-102-AIEngineer
[ "340202b4a37fd0c1e730322bc641c0606bf7e3c1" ]
[ "15-computer-vision/Python/image-analysis/image-analysis.py" ]
[ "from dotenv import load_dotenv\nimport os\nfrom array import array\nfrom PIL import Image, ImageDraw\nimport sys\nimport time\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n# Import namespaces\n# import namespaces\nfrom azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes\nfrom msrest.authentication import CognitiveServicesCredentials\n\ndef main():\n global cv_client\n\n try:\n # Get Configuration Settings\n load_dotenv()\n cog_endpoint = os.getenv('COG_SERVICE_ENDPOINT')\n cog_key = os.getenv('COG_SERVICE_KEY')\n\n # Get image\n image_file = 'images/street.jpg'\n if len(sys.argv) > 1:\n image_file = sys.argv[1]\n\n # Authenticate Computer Vision client\n # Authenticate Computer Vision client\n credential = CognitiveServicesCredentials(cog_key) \n cv_client = ComputerVisionClient(cog_endpoint, credential)\n\n # Analyze image\n AnalyzeImage(image_file)\n\n # Generate thumbnail\n GetThumbnail(image_file)\n\n except Exception as ex:\n print(ex)\n\ndef AnalyzeImage(image_file):\n print('Analyzing', image_file)\n\n # Specify features to be retrieved\n \n # Specify features to be retrieved\n features = [VisualFeatureTypes.description,\n VisualFeatureTypes.tags,\n VisualFeatureTypes.categories,\n VisualFeatureTypes.brands,\n VisualFeatureTypes.objects,\n VisualFeatureTypes.adult]\n # Get image analysis\n # Get image analysis\n with open(image_file, mode=\"rb\") as image_data:\n analysis = cv_client.analyze_image_in_stream(image_data , features)\n\n # Get image description\n for caption in analysis.description.captions:\n print(\"Description: '{}' (confidence: {:.2f}%)\".format(caption.text, caption.confidence * 100))\n\n # Get image tags\n\n # Get image tags\n if (len(analysis.tags) > 0):\n print(\"Tags: \")\n for tag in analysis.tags:\n print(\" -'{}' (confidence: {:.2f}%)\".format(tag.name, tag.confidence * 100))\n # Get image categories \n # Get image categories (including celebrities and landmarks)\n if (len(analysis.categories) > 0):\n print(\"Categories:\")\n landmarks = []\n celebrities = []\n for category in analysis.categories:\n # Print the category\n print(\" -'{}' (confidence: {:.2f}%)\".format(category.name, category.score * 100))\n if category.detail:\n # Get landmarks in this category\n if category.detail.landmarks:\n for landmark in category.detail.landmarks:\n if landmark not in landmarks:\n landmarks.append(landmark)\n\n # Get celebrities in this category\n if category.detail.celebrities:\n for celebrity in category.detail.celebrities:\n if celebrity not in celebrities:\n celebrities.append(celebrity)\n\n # If there were landmarks, list them\n if len(landmarks) > 0:\n print(\"Landmarks:\")\n for landmark in landmarks:\n print(\" -'{}' (confidence: {:.2f}%)\".format(landmark.name, landmark.confidence * 100))\n\n # If there were celebrities, list them\n if len(celebrities) > 0:\n print(\"Celebrities:\")\n for celebrity in celebrities:\n print(\" -'{}' (confidence: {:.2f}%)\".format(celebrity.name, celebrity.confidence * 100))\n\n # Get brands in the image\n # Get brands in the image\n if (len(analysis.brands) > 0):\n print(\"Brands: \")\n for brand in analysis.brands:\n print(\" -'{}' (confidence: {:.2f}%)\".format(brand.name, brand.confidence * 100))\n\n # Get objects in the image\n # Get moderation ratings\n ratings = 'Ratings:\\n -Adult: {}\\n -Racy: {}\\n -Gore: {}'.format(analysis.adult.is_adult_content,\n analysis.adult.is_racy_content,\n analysis.adult.is_gory_content)\n print(ratings)\n\n \n # Get objects in the image\n if len(analysis.objects) > 0:\n print(\"Objects in image:\")\n\n # Prepare image for drawing\n fig = plt.figure(figsize=(8, 8))\n plt.axis('off')\n image = Image.open(image_file)\n draw = ImageDraw.Draw(image)\n color = 'cyan'\n for detected_object in analysis.objects:\n # Print object name\n print(\" -{} (confidence: {:.2f}%)\".format(detected_object.object_property, detected_object.confidence * 100))\n\n # Draw object bounding box\n r = detected_object.rectangle\n bounding_box = ((r.x, r.y), (r.x + r.w, r.y + r.h))\n draw.rectangle(bounding_box, outline=color, width=3)\n plt.annotate(detected_object.object_property,(r.x, r.y), backgroundcolor=color)\n # Save annotated image\n plt.imshow(image)\n outputfile = 'objects.jpg'\n fig.savefig(outputfile)\n print(' Results saved in', outputfile)\n\ndef GetThumbnail(image_file):\n print('Generating thumbnail')\n\n # Generate a thumbnail\n # Generate a thumbnail\n with open(image_file, mode=\"rb\") as image_data:\n # Get thumbnail data\n thumbnail_stream = cv_client.generate_thumbnail_in_stream(100, 100, image_data, True)\n\n # Save thumbnail image\n thumbnail_file_name = 'thumbnail.png'\n with open(thumbnail_file_name, \"wb\") as thumbnail_file:\n for chunk in thumbnail_stream:\n thumbnail_file.write(chunk)\n\n print('Thumbnail saved in.', thumbnail_file_name)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.annotate", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
Veence/robosat.pink
[ "9d336c06df456ea11cd6c7532eff1dcc5adad883" ]
[ "robosat_pink/tools/tile.py" ]
[ "import os\nimport sys\nimport math\nimport argparse\nfrom tqdm import tqdm\n\nimport numpy as np\nfrom PIL import Image\n\nimport mercantile\n\nfrom rasterio import open as rasterio_open\nfrom rasterio.vrt import WarpedVRT\nfrom rasterio.enums import Resampling\nfrom rasterio.warp import transform_bounds, calculate_default_transform\nfrom rasterio.transform import from_bounds\n\nfrom robosat_pink.config import load_config\nfrom robosat_pink.colors import make_palette\nfrom robosat_pink.web_ui import web_ui\n\n\ndef add_parser(subparser):\n parser = subparser.add_parser(\n \"tile\", help=\"tile a raster image or label\", formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument(\"--size\", type=int, default=512, help=\"size of tiles side in pixels\")\n parser.add_argument(\"--zoom\", type=int, required=True, help=\"zoom level of tiles\")\n parser.add_argument(\"--type\", type=str, choices=[\"image\", \"label\"], default=\"image\", help=\"image or label tiling\")\n parser.add_argument(\"--config\", type=str, required=True, help=\"path to configuration file\")\n parser.add_argument(\"--no_data\", type=int, help=\"color considered as no data [0-255]. Skip related tile\")\n parser.add_argument(\"--web_ui\", action=\"store_true\", help=\"activate web ui output\")\n parser.add_argument(\"--web_ui_base_url\", type=str, help=\"web ui alternate base url\")\n parser.add_argument(\"--web_ui_template\", type=str, help=\"path to an alternate web ui template\")\n parser.add_argument(\"raster\", type=str, help=\"path to the raster to tile\")\n parser.add_argument(\"out\", type=str, help=\"directory to write tiles\")\n\n parser.set_defaults(func=main)\n\n\ndef main(args):\n\n colors = [classe[\"color\"] for classe in load_config(args.config)[\"classes\"]]\n\n try:\n raster = rasterio_open(args.raster)\n w, s, e, n = bounds = transform_bounds(raster.crs, \"EPSG:4326\", *raster.bounds)\n transform, _, _ = calculate_default_transform(raster.crs, \"EPSG:3857\", raster.width, raster.height, *bounds)\n except:\n sys.exit(\"Error: Unable to load raster or deal with it's projection\")\n\n tiles = [mercantile.Tile(x=x, y=y, z=z) for x, y, z in mercantile.tiles(w, s, e, n, args.zoom)]\n tiles_nodata = []\n\n for tile in tqdm(tiles, desc=\"Tiling\", unit=\"tile\", ascii=True):\n\n w, s, e, n = tile_bounds = mercantile.xy_bounds(tile)\n\n # Inspired by Rio-Tiler, cf: https://github.com/mapbox/rio-tiler/pull/45\n warp_vrt = WarpedVRT(\n raster,\n crs=\"EPSG:3857\",\n resampling=Resampling.bilinear,\n add_alpha=False,\n transform=from_bounds(*tile_bounds, args.size, args.size),\n width=math.ceil((e - w) / transform.a),\n height=math.ceil((s - n) / transform.e),\n )\n data = warp_vrt.read(out_shape=(len(raster.indexes), args.size, args.size), window=warp_vrt.window(w, s, e, n))\n\n # If no_data is set, remove all tiles with at least one whole border filled only with no_data (on all bands)\n if type(args.no_data) is not None and (\n np.all(data[:, 0, :] == args.no_data)\n or np.all(data[:, -1, :] == args.no_data)\n or np.all(data[:, :, 0] == args.no_data)\n or np.all(data[:, :, -1] == args.no_data)\n ):\n tiles_nodata.append(tile)\n continue\n\n C, W, H = data.shape\n\n os.makedirs(os.path.join(args.out, str(args.zoom), str(tile.x)), exist_ok=True)\n path = os.path.join(args.out, str(args.zoom), str(tile.x), str(tile.y))\n\n if args.type == \"label\":\n assert C == 1, \"Error: Label raster input should be 1 band\"\n\n ext = \"png\"\n img = Image.fromarray(np.squeeze(data, axis=0), mode=\"P\")\n img.putpalette(make_palette(colors[0], colors[1]))\n img.save(\"{}.{}\".format(path, ext), optimize=True)\n\n elif args.type == \"image\":\n assert C == 1 or C == 3, \"Error: Image raster input should be either 1 or 3 bands\"\n\n # GeoTiff could be 16 or 32bits\n if data.dtype == \"uint16\":\n data = np.uint8(data / 256)\n elif data.dtype == \"uint32\":\n data = np.uint8(data / (256 * 256))\n\n if C == 1:\n ext = \"png\"\n Image.fromarray(np.squeeze(data, axis=0), mode=\"L\").save(\"{}.{}\".format(path, ext), optimize=True)\n elif C == 3:\n ext = \"webp\"\n Image.fromarray(np.moveaxis(data, 0, 2), mode=\"RGB\").save(\"{}.{}\".format(path, ext), optimize=True)\n\n if args.web_ui:\n template = \"leaflet.html\" if not args.web_ui_template else args.web_ui_template\n tiles = [tile for tile in tiles if tile not in tiles_nodata]\n base_url = args.web_ui_base_url if args.web_ui_base_url else \"./\"\n web_ui(args.out, base_url, tiles, tiles, ext, template)\n" ]
[ [ "numpy.all", "numpy.uint8", "numpy.squeeze", "numpy.moveaxis" ] ]
jcvasquezc/Disvoice
[ "ed9dbd42c3f01f041f90848f96004be8ebb78d8d" ]
[ "disvoice/phonological/phonological.py" ]
[ "\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Jun 24 2020\n\n@author: J. C. Vasquez-Correa\n\"\"\"\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom phonet.phonet import Phonet\nfrom phonet.phonet import Phonological as phon\nimport scipy.stats as st\nimport matplotlib.pyplot as plt\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\n\nPATH = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.join(PATH, '..'))\nsys.path.append(PATH)\nfrom utils import save_dict_kaldimat, get_dict\n\nfrom script_mananger import script_manager\nimport torch\nfrom tqdm import tqdm\n\nclass Phonological:\n \"\"\"\n Compute phonological features from continuous speech files.\n\n 18 descriptors are computed, bases on 18 different phonological classes from the phonet toolkit \n https://phonet.readthedocs.io/en/latest/?badge=latest\n\n It computes the phonological log-likelihood ratio features from phonet\n\n Static or dynamic matrices can be computed:\n\n Static matrix is formed with 108 features formed with (18 descriptors) x (6 functionals: mean, std, skewness, kurtosis, max, min)\n\n Dynamic matrix is formed with the 18 descriptors computed for frames of 25 ms with a time-shift of 10 ms.\n\n\n Script is called as follows\n\n >>> python phonological.py <file_or_folder_audio> <file_features> <static (true or false)> <plots (true or false)> <format (csv, txt, npy, kaldi, torch)>\n\n Examples command line:\n\n >>> python phonological.py \"../audios/001_ddk1_PCGITA.wav\" \"phonologicalfeaturesAst.txt\" \"true\" \"true\" \"txt\"\n >>> python phonological.py \"../audios/001_ddk1_PCGITA.wav\" \"phonologicalfeaturesUst.csv\" \"true\" \"true\" \"csv\"\n >>> python phonological.py \"../audios/001_ddk1_PCGITA.wav\" \"phonologicalfeaturesUdyn.pt\" \"false\" \"true\" \"torch\"\n\n >>> python phonological.py \"../audios/\" \"phonologicalfeaturesst.txt\" \"true\" \"false\" \"txt\"\n >>> python phonological.py \"../audios/\" \"phonologicalfeaturesst.csv\" \"true\" \"false\" \"csv\"\n >>> python phonological.py \"../audios/\" \"phonologicalfeaturesdyn.pt\" \"false\" \"false\" \"torch\"\n >>> python phonological.py \"../audios/\" \"phonologicalfeaturesdyn.csv\" \"false\" \"false\" \"csv\"\n\n Examples directly in Python\n\n >>> phonological=Phonological()\n >>> file_audio=\"../audios/001_ddk1_PCGITA.wav\"\n >>> features1=phonological.extract_features_file(file_audio, static=True, plots=True, fmt=\"npy\")\n >>> features2=phonological.extract_features_file(file_audio, static=True, plots=True, fmt=\"dataframe\")\n >>> features3=phonological.extract_features_file(file_audio, static=False, plots=True, fmt=\"torch\")\n >>> phonological.extract_features_file(file_audio, static=False, plots=False, fmt=\"kaldi\", kaldi_file=\"./test\")\n\n \"\"\"\n\n def __init__(self):\n phonolist=phon()\n self.head_dyn=phonolist.get_list_phonological_keys()\n self.statistics=[\"mean\", \"std\", \"skewness\", \"kurtosis\", \"max\", \"min\"]\n self.head_st=[]\n for j in self.head_dyn:\n for l in self.statistics:\n self.head_st.append(j+\"_\"+l)\n self.phon=Phonet([\"all\"])\n\n def extract_features_file(self, audio, static=True, plots=False, fmt=\"npy\", kaldi_file=\"\"):\n \"\"\"Extract the phonological features from an audio file\n\n :param audio: .wav audio file.\n :param static: whether to compute and return statistic functionals over the feature matrix, or return the feature matrix computed over frames\n :param plots: timeshift to extract the features\n :param fmt: format to return the features (npy, dataframe, torch, kaldi)\n :param kaldi_file: file to store kaldi features, only valid when fmt==\"kaldi\"\n :returns: features computed from the audio file.\n\n >>> phonological=Phonological()\n >>> file_audio=\"../audios/001_ddk1_PCGITA.wav\"\n >>> features1=phonological.extract_features_file(file_audio, static=True, plots=True, fmt=\"npy\")\n >>> features2=phonological.extract_features_file(file_audio, static=True, plots=True, fmt=\"dataframe\")\n >>> features3=phonological.extract_features_file(file_audio, static=False, plots=True, fmt=\"torch\")\n >>> phonological.extract_features_file(file_audio, static=False, plots=False, fmt=\"kaldi\", kaldi_file=\"./test\")\n\n >>> phonological=Phonological()\n >>> path_audio=\"../audios/\"\n >>> features1=phonological.extract_features_path(path_audio, static=True, plots=False, fmt=\"npy\")\n >>> features2=phonological.extract_features_path(path_audio, static=True, plots=False, fmt=\"csv\")\n >>> features3=phonological.extract_features_path(path_audio, static=False, plots=True, fmt=\"torch\")\n >>> phonological.extract_features_path(path_audio, static=False, plots=False, fmt=\"kaldi\", kaldi_file=\"./test.ark\")\n\n \"\"\"\n if static and fmt==\"kaldi\":\n raise ValueError(\"Kaldi is only supported for dynamic features\")\n\n if audio.find('.wav') == -1 and audio.find('.WAV') == -1:\n raise ValueError(audio+\" is not a valid wav file\")\n \n df=self.phon.get_PLLR(audio, plot_flag=plots)\n\n keys=df.keys().tolist()\n keys.remove('time')\n\n if static:\n feat=[]\n functions=[np.mean, np.std, st.skew, st.kurtosis, np.max, np.min]\n for j in keys:\n for function in functions:\n feat.append(function(df[j]))\n feat=np.expand_dims(feat, axis=0)\n\n else:\n feat=np.stack([df[k] for k in keys], axis=1)\n\n if fmt in(\"npy\",\"txt\"):\n return feat\n elif fmt in(\"dataframe\",\"csv\") and static:\n dff = {}\n for e, k in enumerate(self.head_st):\n dff[k] = feat[:, e]\n return pd.DataFrame(df)\n elif fmt in(\"dataframe\",\"csv\") and not static:\n return df\n elif fmt==\"torch\":\n return torch.from_numpy(feat)\n elif fmt==\"kaldi\":\n featmat=np.stack([df[k] for k in keys], axis=1)\n name_all=audio.split('/')\n dictX={name_all[-1]:featmat}\n save_dict_kaldimat(dictX, kaldi_file)\n else:\n raise ValueError(fmt+\" is not supported\")\n\n\n\n def extract_features_path(self, path_audio, static=True, plots=False, fmt=\"npy\", kaldi_file=\"\"):\n \"\"\"Extract the phonological features for audios inside a path\n \n :param path_audio: directory with (.wav) audio files inside, sampled at 16 kHz\n :param static: whether to compute and return statistic functionals over the feature matrix, or return the feature matrix computed over frames\n :param plots: timeshift to extract the features\n :param fmt: format to return the features (npy, dataframe, torch, kaldi)\n :param kaldi_file: file to store kaldifeatures, only valid when fmt==\"kaldi\"\n :returns: features computed from the audio file.\n\n >>> phonological=Phonological()\n >>> path_audio=\"../audios/\"\n >>> features1=phonological.extract_features_path(path_audio, static=True, plots=False, fmt=\"npy\")\n >>> features2=phonological.extract_features_path(path_audio, static=True, plots=False, fmt=\"csv\")\n >>> features3=phonological.extract_features_path(path_audio, static=False, plots=True, fmt=\"torch\")\n >>> phonological.extract_features_path(path_audio, static=False, plots=False, fmt=\"kaldi\", kaldi_file=\"./test.ark\")\n \"\"\"\n\n hf=os.listdir(path_audio)\n hf.sort()\n\n pbar=tqdm(range(len(hf)))\n ids=[]\n\n Features=[]\n for j in pbar:\n pbar.set_description(\"Processing %s\" % hf[j])\n audio_file=path_audio+hf[j]\n feat=self.extract_features_file(audio_file, static=static, plots=plots, fmt=\"npy\")\n Features.append(feat)\n if static:\n ids.append(hf[j])\n else:\n ids.append(np.repeat(hf[j], feat.shape[0]))\n \n Features=np.vstack(Features)\n ids=np.hstack(ids)\n\n return self.save_features(Features, ids, fmt, static, kaldi_file)\n\n\n\n def save_features(self, Features, ids, fmt, static, kaldi_file):\n\n if static:\n head = self.head_st\n else:\n head = self.head_dyn\n \n if fmt in(\"npy\",\"txt\"):\n return Features\n elif fmt in(\"dataframe\",\"csv\"):\n df = {}\n for e, k in enumerate(head):\n df[k] = Features[:, e]\n df[\"id\"] = ids\n return pd.DataFrame(df)\n elif fmt==\"torch\":\n return torch.from_numpy(Features)\n elif fmt==\"kaldi\":\n dictX=get_dict(Features, ids)\n save_dict_kaldimat(dictX, kaldi_file)\n else:\n raise ValueError(fmt+\" is not supported\")\n\n\nif __name__==\"__main__\":\n\n if len(sys.argv)!=6:\n print(\"python phonological.py <file_or_folder_audio> <file_features> <static (true, false)> <plots (true, false)> <format (csv, txt, npy, kaldi, torch)>\")\n sys.exit()\n\n phonological=Phonological()\n script_manager(sys.argv, phonological)" ]
[ [ "numpy.hstack", "numpy.expand_dims", "torch.from_numpy", "numpy.stack", "pandas.DataFrame", "numpy.repeat", "numpy.vstack" ] ]
tomfisher/embeddedml
[ "37b67d3489c38936f1693f459e62a80b965cbfc4" ]
[ "convtrees/convtrees.py" ]
[ "import math\n\nimport mnist\nimport numpy\nimport pandas\nimport scipy.signal\nfrom matplotlib import pyplot as plt\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.preprocessing import FunctionTransformer, StandardScaler\nfrom sklearn import pipeline\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.cluster import KMeans\n\nfrom spherecluster import SphericalKMeans\n\ndef plot_codebook(codebook):\n shape = codebook[0].shape\n\n n_rows = 5\n cols, rows = math.ceil(len(codebook)/n_rows), n_rows\n\n fig, axs = plt.subplots(rows, cols, figsize=(4*3,4))\n\n fig.patch.set_facecolor('grey')\n for ax in axs.flatten():\n ax.set_visible(False)\n\n for ax, kernel in zip(axs.flatten(), codebook):\n ax.set_visible(True)\n ax.imshow(kernel, cmap='gray')\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n\n fig.tight_layout()\n\n return fig\n\n\ndef random_kernels(N=20, K=3):\n r = numpy.random.random(N*K*K)\n k = r.reshape(-1, K, K)\n return k\n\ndef convolve(img, codebook, kernels, ls, xs, ys, K):\n \n features = numpy.zeros(shape=(len(kernels),))\n\n # TODO: vectorize over images\n for i, (l, k, x, y) in enumerate(zip(ls, kernels, xs, ys)):\n #print('ii', i//5, k, codebook.shape)\n kernel = codebook[l, k]\n #kernel = numpy.zeros(shape=(K,K))\n\n xmax = min(img.shape[0],x+K)\n ymax = min(img.shape[1],y+K)\n\n loc = img[x:xmax, y:ymax]\n conv = scipy.signal.convolve2d(loc, kernel, mode='full')\n features[i] = numpy.sum(conv)\n\n return numpy.stack(features) \n\ndef locations_random_full(shape, N, K=None):\n xs = numpy.random.randint(0, shape[0], N)\n ys = numpy.random.randint(0, shape[0], N)\n return xs, ys\n\ndef locations_random_valid(shape, N, K=3):\n xs = numpy.random.randint(0, shape[0]-K, N)\n ys = numpy.random.randint(0, shape[1]-K, N)\n return xs, ys\n\n\ndef sample_patches(imgs, locations, K, n_patches=100):\n N = len(imgs) * n_patches \n\n xs, ys = locations\n assert len(xs) == len(ys)\n out = numpy.zeros(shape=(len(xs), n_patches, K, K))\n\n img_range = numpy.array(range(0, len(imgs)))\n for l, (x, y) in enumerate(zip(xs, ys)):\n subset = numpy.random.choice(img_range, size=n_patches)\n patch = imgs[subset, x:x+K, y:y+K]\n out[l] = patch\n\n return out\n\ndef kmeans_codebook(patches, k=30):\n shape = patches[0].shape\n\n x = patches.reshape(-1, shape[0]*shape[1])\n # normalize\n #x = x / ( 1e-6 + x.sum(axis=1, keepdims=True) )\n\n est = SphericalKMeans(k)\n #est = KMeans(n_clusters=k)\n est.fit(x)\n\n codebook = est.cluster_centers_.reshape(-1, shape[0], shape[1])\n return codebook\n\n\n\ndef evaluate_mnist():\n\n train_x, train_y = mnist.train_images(), mnist.train_labels()\n test_x, test_y = mnist.test_images(), mnist.test_labels()\n\n K=3\n codebook_size = 30\n n_locations = 30\n n_kernels = 30\n input_shape = (28, 28)\n\n #random_codebook = random_kernels(K=K, N=codebook_size)\n #print('rr code', codebook.shape)\n print('Creating codebook')\n\n locations = locations_random_valid(input_shape, N=n_locations, K=K)\n assert len(locations[0]) == n_locations, (len(locations[0]), n_locations)\n \n loc_patches = sample_patches(train_x, locations, K=K, n_patches=1000)\n all_patches = loc_patches.reshape(-1, K, K)\n\n print('pp', loc_patches.shape, all_patches.shape)\n\n codebook = kmeans_codebook(all_patches, k=codebook_size)\n print('kmeans global codebook', codebook.shape)\n\n loc_codebooks = numpy.array([ kmeans_codebook(p, k=codebook_size) for p in loc_patches ])\n print('loc codebooks', loc_codebooks.shape)\n\n fig = plot_codebook(codebook)\n fig.savefig('codebook.png', facecolor=fig.get_facecolor())\n\n\n input_area = numpy.array(input_shape).prod()\n sample_area = K*K*n_locations\n coverage = sample_area / input_area\n print('cc', sample_area, input_area)\n # TODO: could calculate effective sampled area\n print('Coverage: {:d}% {:d}px/{:d}px'.format(int(coverage*100), sample_area, input_area))\n\n xs, ys = locations\n xs = numpy.repeat(xs, n_kernels)\n ys = numpy.repeat(ys, n_kernels)\n ls = list(range(n_locations))\n ls = numpy.repeat(ls, n_kernels)\n\n ks = numpy.random.randint(0, codebook_size, len(xs))\n\n assert len(ks) == len(xs), (len(ks), len(xs))\n assert len(ls) == len(xs), (len(ls), len(xs))\n\n# ks = numpy.flatten([ numpy.random.randint(0, len(cc), size=n_kernels) for cc in loc_codebooks ])\n\n\n def transform(imgs):\n f = [ convolve(i, loc_codebooks, ks, ls, xs, ys, K=K) for i in imgs ]\n f = numpy.array(f)\n return f\n\n print('Precomputing features')\n train_x = transform(train_x)\n test_x = transform(test_x)\n print('Feat', train_x.shape)\n\n clf = RandomForestClassifier(n_estimators=100, min_samples_leaf=1e-6)\n params = {\n 'n_estimators': [ 1, 10, 100],\n 'min_samples_leaf': [1e-5],\n #'min_samples_leaf': numpy.logspace(-7, -4, 10),\n #'n_estimators': numpy.linspace(30, 100, 4).astype(int),\n }\n\n #ff = FunctionTransformer(func=transform, validate=True)\n\n #clf = pipeline.make_pipeline(ff, clf)\n\n #print('p\\n', params)\n\n clf = GridSearchCV(clf, params, cv=3, scoring='accuracy', return_train_score=True,\n verbose=2, n_jobs=-1)\n\n print(train_x.shape, train_x.shape[0])\n\n #train_x = train_x.reshape(-1, 28*28)\n #test_x = test_x.reshape(-1, 28*28)\n #train_x = train_x[:1000]\n #train_y = train_y[:1000]\n\n print(\"Train model\")\n clf.fit(train_x, train_y)\n expected = test_y.tolist()\n\n print(\"Compute predictions\")\n predicted = clf.predict(test_x)\n print(\"Accuracy: \", accuracy_score(expected, predicted))\n\n df = pandas.DataFrame(clf.cv_results_)\n df.to_csv('results.csv')\n print('CV results\\n', df[['param_min_samples_leaf', 'param_n_estimators', 'mean_train_score', 'mean_test_score']])\n\n\nif __name__ == '__main__':\n\n mnist.temporary_dir = lambda: './data'\n evaluate_mnist()\n\n" ]
[ [ "sklearn.model_selection.GridSearchCV", "numpy.random.random", "sklearn.ensemble.RandomForestClassifier", "numpy.random.choice", "sklearn.metrics.accuracy_score", "matplotlib.pyplot.subplots", "numpy.stack", "pandas.DataFrame", "numpy.repeat", "numpy.array", "numpy.sum", "numpy.random.randint" ] ]
feihuidiqiu/HRNet-Facial-Landmark-Detection
[ "cc9d86a7b1270bec1aa25da0c81ef27f1b8b69a0" ]
[ "lib/utils/transforms.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Created by Tianheng Cheng([email protected]), Yang Zhao\n# ------------------------------------------------------------------------------\n\nimport cv2\nimport torch\nimport scipy\nimport scipy.misc\nimport numpy as np\n\n\nMATCHED_PARTS = {\n \"300W\": ([1, 17], [2, 16], [3, 15], [4, 14], [5, 13], [6, 12], [7, 11], [8, 10],\n [18, 27], [19, 26], [20, 25], [21, 24], [22, 23],\n [32, 36], [33, 35],\n [37, 46], [38, 45], [39, 44], [40, 43], [41, 48], [42, 47],\n [49, 55], [50, 54], [51, 53], [62, 64], [61, 65], [68, 66], [59, 57], [60, 56]),\n \"AFLW\": ([1, 6], [2, 5], [3, 4],\n [7, 12], [8, 11], [9, 10],\n [13, 15],\n [16, 18]),\n \"COFW\": ([1, 2], [5, 7], [3, 4], [6, 8], [9, 10], [11, 12], [13, 15], [17, 18], [14, 16], [19, 20], [23, 24]),\n \"WFLW\": ([0, 32], [1, 31], [2, 30], [3, 29], [4, 28], [5, 27], [6, 26], [7, 25], [8, 24], [9, 23], [10, 22],\n [11, 21], [12, 20], [13, 19], [14, 18], [15, 17], # check\n [33, 46], [34, 45], [35, 44], [36, 43], [37, 42], [38, 50], [39, 49], [40, 48], [41, 47], # elbrow\n [60, 72], [61, 71], [62, 70], [63, 69], [64, 68], [65, 75], [66, 74], [67, 73],\n [55, 59], [56, 58],\n [76, 82], [77, 81], [78, 80], [87, 83], [86, 84],\n [88, 92], [89, 91], [95, 93], [96, 97])}\n\n\ndef fliplr_joints(x, width, dataset='aflw'):\n \"\"\"\n flip coords\n \"\"\"\n matched_parts = MATCHED_PARTS[dataset]\n # Flip horizontal\n x[:, 0] = width - x[:, 0]\n\n if dataset == 'WFLW':\n for pair in matched_parts:\n tmp = x[pair[0], :].copy()\n x[pair[0], :] = x[pair[1], :]\n x[pair[1], :] = tmp\n else:\n for pair in matched_parts:\n tmp = x[pair[0] - 1, :].copy()\n x[pair[0] - 1, :] = x[pair[1] - 1, :]\n x[pair[1] - 1, :] = tmp\n return x\n\n\ndef get_3rd_point(a, b):\n direct = a - b\n return b + np.array([-direct[1], direct[0]], dtype=np.float32)\n\n\ndef get_dir(src_point, rot_rad):\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n\n src_result = [0, 0]\n src_result[0] = src_point[0] * cs - src_point[1] * sn\n src_result[1] = src_point[0] * sn + src_point[1] * cs\n\n return src_result\n\n\ndef get_affine_transform(\n center, scale, rot, output_size,\n shift=np.array([0, 0], dtype=np.float32), inv=0):\n if not isinstance(scale, np.ndarray) and not isinstance(scale, list):\n scale = np.array([scale, scale])\n\n scale_tmp = scale * 200.0\n src_w = scale_tmp[0]\n dst_w = output_size[0]\n dst_h = output_size[1]\n\n rot_rad = np.pi * rot / 180\n src_dir = get_dir([0, src_w * -0.5], rot_rad)\n dst_dir = np.array([0, dst_w * -0.5], np.float32)\n\n src = np.zeros((3, 2), dtype=np.float32)\n dst = np.zeros((3, 2), dtype=np.float32)\n center=center.numpy()\n src[0, :] = center + scale_tmp * shift\n src[1, :] = center + src_dir + scale_tmp * shift\n dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir\n\n src[2:, :] = get_3rd_point(src[0, :], src[1, :])\n dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])\n\n if inv:\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n else:\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n return trans\n\n\ndef crop_v2(img, center, scale, output_size, rot=0):\n trans = get_affine_transform(center, scale, rot, output_size)\n\n dst_img = cv2.warpAffine(\n img, trans, (int(output_size[0]), int(output_size[1])),\n flags=cv2.INTER_LINEAR\n )\n\n return dst_img\n\n\ndef get_transform(center, scale, output_size, rot=0):\n \"\"\"\n General image processing functions\n \"\"\"\n # Generate transformation matrix\n h = 200 * scale\n t = np.zeros((3, 3))\n t[0, 0] = float(output_size[1]) / h\n t[1, 1] = float(output_size[0]) / h\n t[0, 2] = output_size[1] * (-float(center[0]) / h + .5)\n t[1, 2] = output_size[0] * (-float(center[1]) / h + .5)\n t[2, 2] = 1\n if not rot == 0:\n rot = -rot # To match direction of rotation from cropping\n rot_mat = np.zeros((3, 3))\n rot_rad = rot * np.pi / 180\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n rot_mat[0, :2] = [cs, -sn]\n rot_mat[1, :2] = [sn, cs]\n rot_mat[2, 2] = 1\n # Need to rotate around center\n t_mat = np.eye(3)\n t_mat[0, 2] = -output_size[1]/2\n t_mat[1, 2] = -output_size[0]/2\n t_inv = t_mat.copy()\n t_inv[:2, 2] *= -1\n t = np.dot(t_inv, np.dot(rot_mat, np.dot(t_mat, t)))\n return t\n\n\ndef transform_pixel(pt, center, scale, output_size, invert=0, rot=0):\n # Transform pixel location to different reference\n t = get_transform(center, scale, output_size, rot=rot)\n if invert:\n t = np.linalg.inv(t)\n new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2].astype(int) + 1\n\n\ndef transform_preds(coords, center, scale, output_size):\n\n for p in range(coords.size(0)):\n coords[p, 0:2] = torch.tensor(transform_pixel(coords[p, 0:2], center, scale, output_size, 1, 0))\n return coords\n\n\ndef crop(img, center, scale, output_size, rot=0):\n center_new = center.clone()\n\n # Preprocessing for efficient cropping\n ht, wd = img.shape[0], img.shape[1]\n sf = scale * 200.0 / output_size[0]\n if sf < 2:\n sf = 1\n else:\n new_size = int(np.math.floor(max(ht, wd) / sf))\n new_ht = int(np.math.floor(ht / sf))\n new_wd = int(np.math.floor(wd / sf))\n if new_size < 2:\n return torch.zeros(output_size[0], output_size[1], img.shape[2]) \\\n if len(img.shape) > 2 else torch.zeros(output_size[0], output_size[1])\n else:\n from PIL import Image\n# print([new_ht, new_wd])\n img=np.array(Image.fromarray(img.astype(np.uint8)).resize([ new_wd,new_ht]))\n# img = scipy.misc.imresize(img, [new_ht, new_wd]) # (0-1)-->(0-255)\n center_new[0] = center_new[0] * 1.0 / sf\n center_new[1] = center_new[1] * 1.0 / sf\n scale = scale / sf\n\n # Upper left point\n ul = np.array(transform_pixel([0, 0], center_new, scale, output_size, invert=1))\n # Bottom right point\n br = np.array(transform_pixel(output_size, center_new, scale, output_size, invert=1))\n\n # Padding so that when rotated proper amount of context is included\n pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)\n if not rot == 0:\n ul -= pad\n br += pad\n\n new_shape = [br[1] - ul[1], br[0] - ul[0]]\n if len(img.shape) > 2:\n new_shape += [img.shape[2]]\n\n new_img = np.zeros(new_shape, dtype=np.float32)\n\n # Range to fill new array\n new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]\n new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]\n # Range to sample from original image\n old_x = max(0, ul[0]), min(len(img[0]), br[0])\n old_y = max(0, ul[1]), min(len(img), br[1])\n new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]]\n\n if not rot == 0:\n # Remove padding\n import skimage\n new_img =skimage.transform.rotate(new_img, rot)\n new_img = new_img[pad:-pad, pad:-pad]\n# print(output_size)\n# print(type(new_img))\n from PIL import Image\n \n new_img = np.array(Image.fromarray(new_img.astype(np.uint8)).resize(output_size))\n return new_img\n\n\ndef generate_target(img, pt, sigma, label_type='Gaussian'):\n # Check that any part of the gaussian is in-bounds\n tmp_size = sigma * 3\n ul = [int(pt[0] - tmp_size), int(pt[1] - tmp_size)]\n br = [int(pt[0] + tmp_size + 1), int(pt[1] + tmp_size + 1)]\n if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or\n br[0] < 0 or br[1] < 0):\n # If not, just return the image as is\n return img\n\n # Generate gaussian\n size = 2 * tmp_size + 1\n x = np.arange(0, size, 1, np.float32)\n y = x[:, np.newaxis]\n x0 = y0 = size // 2\n # The gaussian is not normalized, we want the center value to equal 1\n if label_type == 'Gaussian':\n g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))\n else:\n g = sigma / (((x - x0) ** 2 + (y - y0) ** 2 + sigma ** 2) ** 1.5)\n\n # Usable gaussian range\n g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]\n g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]\n # Image range\n img_x = max(0, ul[0]), min(br[0], img.shape[1])\n img_y = max(0, ul[1]), min(br[1], img.shape[0])\n\n img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]\n return img\n" ]
[ [ "numpy.dot", "torch.zeros", "numpy.linalg.inv", "numpy.arange", "numpy.eye", "numpy.cos", "numpy.linalg.norm", "numpy.sin", "numpy.float32", "numpy.exp", "numpy.array", "numpy.zeros", "numpy.math.floor" ] ]
Galaxy-SynBioCAD/rpSelenzyme_image
[ "b6e1f3db18a89d9d84ff2f8285cb8d26e2a9db95" ]
[ "selenzy/tools/storefingerprints.py" ]
[ "\n\nfrom rdkit import Chem\nfrom rdkit.Chem.rdMolDescriptors import GetMorganFingerprint, GetAtomPairFingerprint, GetTopologicalTorsionFingerprint\nfrom rdkit.Chem.rdmolops import PatternFingerprint, RDKFingerprint\nfrom rdkit import DataStructs\nfrom os import path\nimport numpy as np\nimport csv, sys\n\n\ndef reactSMILES2FP(smi, smiles, fps, ffun, param):\n \"\"\" Return left / right reaction fingerprint \"\"\"\n \"\"\" The reaction could be replaced by rdkit \"\"\"\n left, right = smi.split('>>')\n rleft = left.split('.')\n rright = right.split('.')\n ok = True\n mleft = []\n mright = []\n for c in rleft:\n if c not in smiles:\n try:\n smiles[c] = Chem.MolFromSmiles(c)\n except:\n ok = False\n break\n mleft.append((c, smiles[c]))\n if not ok:\n return None\n for c in rright:\n if c not in smiles:\n try:\n smiles[c] = Chem.MolFromSmiles(c)\n except:\n ok = False\n break\n mright.append((c, smiles[c]))\n if not ok:\n return None\n rfp = [None, None]\n for c in mright:\n if c[0] not in fps:\n try:\n if param is not None:\n if ffun == GetMorganFingerprint:\n fps[c[0]] = ffun(c[1], radius=param)\n elif ffun == RDKFingerprint:\n fps[c[0]] = ffun(c[1], maxPath=param)\n else:\n fps[c[0]] = ffun(c[1])\n else:\n fps[c[0]] = ffun(c[1])\n except:\n ok = False\n break\n if rfp[1] is None:\n rfp[1] = fps[c[0]]\n else:\n rfp[1] = rfp[1] | fps[c[0]]\n if not ok or rfp[1] is None:\n return None\n for c in mleft:\n if c[0] not in fps:\n try:\n if param is not None:\n if ffun == GetMorganFingerprint:\n fps[c[0]] = ffun(c[1], radius=param)\n elif ffun == RDKFingerprint:\n fps[c[0]] = ffun(c[1], maxPath=param)\n else:\n fps[c[0]] = ffun(c[1])\n else:\n fps[c[0]] = ffun(c[1])\n except:\n ok = False\n break\n if rfp[0] is None:\n rfp[0] = fps[c[0]]\n else:\n rfp[0] = rfp[0] | fps[c[0]]\n if not ok or rfp[0] is None:\n return None\n return rfp\n\n\n\ndef reactionFingerprint(ffun, fname, param=None, bit=False):\n \"\"\" Reaction binary fingerprint based on prod-subs fingerprint logic difference \"\"\"\n \"\"\" rsmifile: precomputed reaction SMILES from METANETX2 \"\"\"\n csv.field_size_limit(sys.maxsize) # To avoid error with long csv fields\n rsmiFile = path.join('../data', 'reac_smi.csv')\n smiles = {}\n fps = {}\n rfp = {}\n with open(rsmiFile) as f:\n for row in csv.DictReader(f):\n rid = row['RID']\n smi = row['SMILES']\n fp = reactSMILES2FP(smi, smiles, fps, ffun, param)\n if fp is not None:\n rfp[rid] = fp\n\n fpNames = sorted(rfp)\n if bit:\n fp = [rfp[x].ToBitString() for x in fpNames]\n else:\n fp = [rfp[x] for x in fpNames]\n f = np.savez_compressed(fname, x=fp, y=fpNames)\n return rfp\n\n\ndef getReactants(dbfile):\n clist = set()\n for line in open(dbfile):\n if line.startswith('#'):\n continue\n row= line.rstrip().split('\\t')\n for x in row[1].split(' '):\n if x.startswith('MNXM'):\n clist.add(x)\n return clist\n\ndef getStructs(dbfile):\n structs = {}\n for l in open(dbfile):\n if l.startswith('#'):\n continue\n m = l.rstrip().split('\\t')\n cid = m[0]\n smiles = m[6]\n if len(smiles) > 0:\n structs[cid] = smiles\n return structs\n\ndef getMols():\n mol = {}\n clist = getReactants(path.join('../data', 'reac_prop.tsv'))\n cstr = getStructs(path.join('../data', 'chem_prop.tsv'))\n for c in set(cstr) & clist: \n try:\n mol[c] = Chem.MolFromSmiles(cstr[c])\n except:\n continue\n return mol\n\ndef storeFingerprint(mol, ffun, fname, param=None, bit=False):\n fp = []\n fpNames = []\n print('Computing fingerprints...')\n for c in mol:\n try:\n if param is not None:\n fp.append( ffun(mol[c], param) )\n else:\n if bit:\n fp.append( ffun(mol[c]).ToBitString() )\n else:\n fp.append( ffun(mol[c]) )\n except:\n continue\n fpNames.append(c)\n print('Saving...') \n f = np.savez_compressed(fname, x=fp, y=fpNames)\n\ndef testPattern(ptfile, bit=False):\n \"\"\" Test how to reload PatternFingerprint \"\"\"\n print('Validating fingerprint...')\n data = np.load(ptfile, allow_pickle=True)\n fps = data['x']\n fpNames = data['y']\n if bit:\n fp = [DataStructs.CreateFromBitString(z) for z in fps]\n else:\n fp = fps\n sim = DataStructs.BulkTanimotoSimilarity(fp[0], list(fp))\n return fp\n\n\nprint('Pattern fingerprint....')\nreactionFingerprint(PatternFingerprint, 'ptrfp.npz', bit=True)\nprint('RDK fingerprint....')\nfor radius in range(1,11):\n reactionFingerprint(RDKFingerprint, 'rdkrfp%d.npz' % (radius,), param=radius, bit=True)\nprint('Morgan fingerprint....')\nfor radius in range(1,11):\n reactionFingerprint(GetMorganFingerprint, 'mgrfp%d.npz' % (radius,), param=radius)\n\nsys.exit()\n\nmol = getMols()\nprint('Pattern fingerprint....')\nstoreFingerprint(mol, PatternFingerprint, 'ptfp.npz', bit=True)\nfp = testPattern('ptfp.npz', bit=True) \nprint('RDK fingerprint....')\nstoreFingerprint(mol, RDKFingerprint, 'rdkfp.npz', bit=True)\nfp = testPattern('rdkfp.npz', bit=True)\nprint('Atom pair fingerprint....') \nstoreFingerprint(mol, GetAtomPairFingerprint, 'apfp.npz')\nfp = testPattern('apfp.npz')\nprint('Topological torsion fingerprint....') \nstoreFingerprint(mol, GetTopologicalTorsionFingerprint, 'ttfp.npz')\nfp = testPattern('ttfp.npz')\nprint('Morgan fingerprint....') \nfor radius in range(1,11):\n storeFingerprint(mol, GetMorganFingerprint, 'mgfp%d.npz' % (radius,) , radius)\n fp = testPattern('mgfp%d.npz' % (radius,))\n" ]
[ [ "numpy.load", "numpy.savez_compressed" ] ]
jglaser/arrow
[ "c547f4d44997a73feb11ed85c195b22f557b42b7" ]
[ "python/pyarrow/tests/test_pandas.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport gc\nimport decimal\nimport json\nimport multiprocessing as mp\nimport sys\n\nfrom collections import OrderedDict\nfrom datetime import date, datetime, time, timedelta\nfrom distutils.version import LooseVersion\n\nimport hypothesis as h\nimport hypothesis.extra.pytz as tzst\nimport hypothesis.strategies as st\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\nimport pytz\n\nfrom pyarrow.pandas_compat import get_logical_type, _pandas_api\nfrom pyarrow.tests.util import random_ascii, rands\n\nimport pyarrow as pa\ntry:\n from pyarrow import parquet as pq\nexcept ImportError:\n pass\n\ntry:\n import pandas as pd\n import pandas.testing as tm\n from .pandas_examples import dataframe_with_arrays, dataframe_with_lists\nexcept ImportError:\n pass\n\n\n# Marks all of the tests in this module\npytestmark = pytest.mark.pandas\n\n\ndef _alltypes_example(size=100):\n return pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n # TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,\n # us, ns\n 'datetime': np.arange(\"2016-01-01T00:00:00.001\", size,\n dtype='datetime64[ms]'),\n 'str': [str(x) for x in range(size)],\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'empty_str': [''] * size\n })\n\n\ndef _check_pandas_roundtrip(df, expected=None, use_threads=False,\n expected_schema=None,\n check_dtype=True, schema=None,\n preserve_index=False,\n as_batch=False):\n klass = pa.RecordBatch if as_batch else pa.Table\n table = klass.from_pandas(df, schema=schema,\n preserve_index=preserve_index,\n nthreads=2 if use_threads else 1)\n result = table.to_pandas(use_threads=use_threads)\n\n if expected_schema:\n # all occurrences of _check_pandas_roundtrip passes expected_schema\n # without the pandas generated key-value metadata\n assert table.schema.equals(expected_schema)\n\n if expected is None:\n expected = df\n\n tm.assert_frame_equal(result, expected, check_dtype=check_dtype,\n check_index_type=('equiv' if preserve_index\n else False))\n\n\ndef _check_series_roundtrip(s, type_=None, expected_pa_type=None):\n arr = pa.array(s, from_pandas=True, type=type_)\n\n if type_ is not None and expected_pa_type is None:\n expected_pa_type = type_\n\n if expected_pa_type is not None:\n assert arr.type == expected_pa_type\n\n result = pd.Series(arr.to_pandas(), name=s.name)\n tm.assert_series_equal(s, result)\n\n\ndef _check_array_roundtrip(values, expected=None, mask=None,\n type=None):\n arr = pa.array(values, from_pandas=True, mask=mask, type=type)\n result = arr.to_pandas()\n\n values_nulls = pd.isnull(values)\n if mask is None:\n assert arr.null_count == values_nulls.sum()\n else:\n assert arr.null_count == (mask | values_nulls).sum()\n\n if expected is None:\n if mask is None:\n expected = pd.Series(values)\n else:\n expected = pd.Series(np.ma.masked_array(values, mask=mask))\n\n tm.assert_series_equal(pd.Series(result), expected, check_names=False)\n\n\ndef _check_array_from_pandas_roundtrip(np_array, type=None):\n arr = pa.array(np_array, from_pandas=True, type=type)\n result = arr.to_pandas()\n npt.assert_array_equal(result, np_array)\n\n\nclass TestConvertMetadata:\n \"\"\"\n Conversion tests for Pandas metadata & indices.\n \"\"\"\n\n def test_non_string_columns(self):\n df = pd.DataFrame({0: [1, 2, 3]})\n table = pa.Table.from_pandas(df)\n assert table.field(0).name == '0'\n\n def test_from_pandas_with_columns(self):\n df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]},\n columns=[1, 0])\n\n table = pa.Table.from_pandas(df, columns=[0, 1])\n expected = pa.Table.from_pandas(df[[0, 1]])\n assert expected.equals(table)\n\n record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])\n record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])\n assert record_batch_expected.equals(record_batch_table)\n\n def test_column_index_names_are_preserved(self):\n df = pd.DataFrame({'data': [1, 2, 3]})\n df.columns.names = ['a']\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_range_index_shortcut(self):\n # ARROW-1639\n index_name = 'foo'\n df = pd.DataFrame({'a': [1, 2, 3, 4]},\n index=pd.RangeIndex(0, 8, step=2, name=index_name))\n\n df2 = pd.DataFrame({'a': [4, 5, 6, 7]},\n index=pd.RangeIndex(0, 4))\n\n table = pa.Table.from_pandas(df)\n table_no_index_name = pa.Table.from_pandas(df2)\n\n # The RangeIndex is tracked in the metadata only\n assert len(table.schema) == 1\n\n result = table.to_pandas()\n tm.assert_frame_equal(result, df)\n assert isinstance(result.index, pd.RangeIndex)\n assert _pandas_api.get_rangeindex_attribute(result.index, 'step') == 2\n assert result.index.name == index_name\n\n result2 = table_no_index_name.to_pandas()\n tm.assert_frame_equal(result2, df2)\n assert isinstance(result2.index, pd.RangeIndex)\n assert _pandas_api.get_rangeindex_attribute(result2.index, 'step') == 1\n assert result2.index.name is None\n\n def test_range_index_force_serialization(self):\n # ARROW-5427: preserve_index=True will force the RangeIndex to\n # be serialized as a column rather than tracked more\n # efficiently as metadata\n df = pd.DataFrame({'a': [1, 2, 3, 4]},\n index=pd.RangeIndex(0, 8, step=2, name='foo'))\n\n table = pa.Table.from_pandas(df, preserve_index=True)\n assert table.num_columns == 2\n assert 'foo' in table.column_names\n\n restored = table.to_pandas()\n tm.assert_frame_equal(restored, df)\n\n def test_rangeindex_doesnt_warn(self):\n # ARROW-5606: pandas 0.25 deprecated private _start/stop/step\n # attributes -> can be removed if support < pd 0.25 is dropped\n df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])\n\n with pytest.warns(None) as record:\n _check_pandas_roundtrip(df, preserve_index=True)\n\n assert len(record) == 0\n\n def test_multiindex_columns(self):\n columns = pd.MultiIndex.from_arrays([\n ['one', 'two'], ['X', 'Y']\n ])\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_columns_with_dtypes(self):\n columns = pd.MultiIndex.from_arrays(\n [\n ['one', 'two'],\n pd.DatetimeIndex(['2017-08-01', '2017-08-02']),\n ],\n names=['level_1', 'level_2'],\n )\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_with_datetimes(self):\n # ARROW-3651. This bug occurred only when the dtype of the columns is\n # object. It does not occur for datetime64[ns]\n df = pd.DataFrame(1, index=pd.Index(list(range(5)), name='index'),\n columns=pd.Index([datetime(2018, 1, 1)], dtype='O'))\n assert df.columns.dtype == 'object'\n reconstructed = pa.table(df).to_pandas()\n\n # The reconstruction process results in object->datetime64[ns]\n df_expected = df.copy()\n df_expected.columns = df.columns.values\n assert df_expected.columns.dtype == 'datetime64[ns]'\n tm.assert_frame_equal(df_expected, reconstructed)\n\n def test_multiindex_columns_unicode(self):\n columns = pd.MultiIndex.from_arrays([['あ', 'い'], ['X', 'Y']])\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_doesnt_warn(self):\n # ARROW-3953: pandas 0.24 rename of MultiIndex labels to codes\n columns = pd.MultiIndex.from_arrays([['one', 'two'], ['X', 'Y']])\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n\n with pytest.warns(None) as record:\n _check_pandas_roundtrip(df, preserve_index=True)\n\n assert len(record) == 0\n\n def test_integer_index_column(self):\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_index_metadata_field_name(self):\n # test None case, and strangely named non-index columns\n df = pd.DataFrame(\n [(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],\n index=pd.MultiIndex.from_arrays(\n [['c', 'b', 'a'], [3, 2, 1]],\n names=[None, 'foo']\n ),\n columns=['a', None, '__index_level_0__'],\n )\n with pytest.warns(UserWarning):\n t = pa.Table.from_pandas(df, preserve_index=True)\n js = t.schema.pandas_metadata\n\n col1, col2, col3, idx0, foo = js['columns']\n\n assert col1['name'] == 'a'\n assert col1['name'] == col1['field_name']\n\n assert col2['name'] is None\n assert col2['field_name'] == 'None'\n\n assert col3['name'] == '__index_level_0__'\n assert col3['name'] == col3['field_name']\n\n idx0_descr, foo_descr = js['index_columns']\n assert idx0_descr == '__index_level_0__'\n assert idx0['field_name'] == idx0_descr\n assert idx0['name'] is None\n\n assert foo_descr == 'foo'\n assert foo['field_name'] == foo_descr\n assert foo['name'] == foo_descr\n\n def test_categorical_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.Index(list('def'), dtype='category')\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n js = t.schema.pandas_metadata\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] is None\n assert column_indexes['pandas_type'] == 'categorical'\n assert column_indexes['numpy_type'] == 'int8'\n\n md = column_indexes['metadata']\n assert md['num_categories'] == 3\n assert md['ordered'] is False\n\n def test_string_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.Index(list('def'), name='stringz')\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n js = t.schema.pandas_metadata\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] == 'stringz'\n assert column_indexes['name'] == column_indexes['field_name']\n assert column_indexes['numpy_type'] == 'object'\n assert column_indexes['pandas_type'] == 'unicode'\n\n md = column_indexes['metadata']\n\n assert len(md) == 1\n assert md['encoding'] == 'UTF-8'\n\n def test_datetimetz_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.date_range(\n start='2017-01-01', periods=3, tz='America/New_York'\n )\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n js = t.schema.pandas_metadata\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] is None\n assert column_indexes['pandas_type'] == 'datetimetz'\n assert column_indexes['numpy_type'] == 'datetime64[ns]'\n\n md = column_indexes['metadata']\n assert md['timezone'] == 'America/New_York'\n\n def test_datetimetz_row_index(self):\n df = pd.DataFrame({\n 'a': pd.date_range(\n start='2017-01-01', periods=3, tz='America/New_York'\n )\n })\n df = df.set_index('a')\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_categorical_row_index(self):\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})\n df['a'] = df.a.astype('category')\n df = df.set_index('a')\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_duplicate_column_names_does_not_crash(self):\n df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))\n with pytest.raises(ValueError):\n pa.Table.from_pandas(df)\n\n def test_dictionary_indices_boundscheck(self):\n # ARROW-1658. No validation of indices leads to segfaults in pandas\n indices = [[0, 1], [0, -1]]\n\n for inds in indices:\n arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)\n batch = pa.RecordBatch.from_arrays([arr], ['foo'])\n table = pa.Table.from_batches([batch, batch, batch])\n\n with pytest.raises(IndexError):\n arr.to_pandas()\n\n with pytest.raises(IndexError):\n table.to_pandas()\n\n def test_unicode_with_unicode_column_and_index(self):\n df = pd.DataFrame({'あ': ['い']}, index=['う'])\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_mixed_column_names(self):\n # mixed type column names are not reconstructed exactly\n df = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})\n\n for cols in [['あ', b'a'], [1, '2'], [1, 1.5]]:\n df.columns = pd.Index(cols, dtype=object)\n\n # assert that the from_pandas raises the warning\n with pytest.warns(UserWarning):\n pa.Table.from_pandas(df)\n\n expected = df.copy()\n expected.columns = df.columns.astype(str)\n with pytest.warns(UserWarning):\n _check_pandas_roundtrip(df, expected=expected,\n preserve_index=True)\n\n def test_binary_column_name(self):\n column_data = ['い']\n key = 'あ'.encode()\n data = {key: column_data}\n df = pd.DataFrame(data)\n\n # we can't use _check_pandas_roundtrip here because our metadata\n # is always decoded as utf8: even if binary goes in, utf8 comes out\n t = pa.Table.from_pandas(df, preserve_index=True)\n df2 = t.to_pandas()\n assert df.values[0] == df2.values[0]\n assert df.index.values[0] == df2.index.values[0]\n assert df.columns[0] == key\n\n def test_multiindex_duplicate_values(self):\n num_rows = 3\n numbers = list(range(num_rows))\n index = pd.MultiIndex.from_arrays(\n [['foo', 'foo', 'bar'], numbers],\n names=['foobar', 'some_numbers'],\n )\n\n df = pd.DataFrame({'numbers': numbers}, index=index)\n\n table = pa.Table.from_pandas(df)\n result_df = table.to_pandas()\n tm.assert_frame_equal(result_df, df)\n\n def test_metadata_with_mixed_types(self):\n df = pd.DataFrame({'data': [b'some_bytes', 'some_unicode']})\n table = pa.Table.from_pandas(df)\n js = table.schema.pandas_metadata\n assert 'mixed' not in js\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'bytes'\n assert data_column['numpy_type'] == 'object'\n\n def test_ignore_metadata(self):\n df = pd.DataFrame({'a': [1, 2, 3], 'b': ['foo', 'bar', 'baz']},\n index=['one', 'two', 'three'])\n table = pa.Table.from_pandas(df)\n\n result = table.to_pandas(ignore_metadata=True)\n expected = (table.cast(table.schema.remove_metadata())\n .to_pandas())\n\n tm.assert_frame_equal(result, expected)\n\n def test_list_metadata(self):\n df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})\n schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])\n table = pa.Table.from_pandas(df, schema=schema)\n js = table.schema.pandas_metadata\n assert 'mixed' not in js\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'list[int64]'\n assert data_column['numpy_type'] == 'object'\n\n def test_struct_metadata(self):\n df = pd.DataFrame({'dicts': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]})\n table = pa.Table.from_pandas(df)\n pandas_metadata = table.schema.pandas_metadata\n assert pandas_metadata['columns'][0]['pandas_type'] == 'object'\n\n def test_decimal_metadata(self):\n expected = pd.DataFrame({\n 'decimals': [\n decimal.Decimal('394092382910493.12341234678'),\n -decimal.Decimal('314292388910493.12343437128'),\n ]\n })\n table = pa.Table.from_pandas(expected)\n js = table.schema.pandas_metadata\n assert 'mixed' not in js\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'decimal'\n assert data_column['numpy_type'] == 'object'\n assert data_column['metadata'] == {'precision': 26, 'scale': 11}\n\n def test_table_column_subset_metadata(self):\n # ARROW-1883\n # non-default index\n for index in [\n pd.Index(['a', 'b', 'c'], name='index'),\n pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')]:\n df = pd.DataFrame({'a': [1, 2, 3],\n 'b': [.1, .2, .3]}, index=index)\n table = pa.Table.from_pandas(df)\n\n table_subset = table.remove_column(1)\n result = table_subset.to_pandas()\n expected = df[['a']]\n if isinstance(df.index, pd.DatetimeIndex):\n df.index.freq = None\n tm.assert_frame_equal(result, expected)\n\n table_subset2 = table_subset.remove_column(1)\n result = table_subset2.to_pandas()\n tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))\n\n def test_empty_list_metadata(self):\n # Create table with array of empty lists, forced to have type\n # list(string) in pyarrow\n c1 = [[\"test\"], [\"a\", \"b\"], None]\n c2 = [[], [], []]\n arrays = OrderedDict([\n ('c1', pa.array(c1, type=pa.list_(pa.string()))),\n ('c2', pa.array(c2, type=pa.list_(pa.string()))),\n ])\n rb = pa.RecordBatch.from_arrays(\n list(arrays.values()),\n list(arrays.keys())\n )\n tbl = pa.Table.from_batches([rb])\n\n # First roundtrip changes schema, because pandas cannot preserve the\n # type of empty lists\n df = tbl.to_pandas()\n tbl2 = pa.Table.from_pandas(df)\n md2 = tbl2.schema.pandas_metadata\n\n # Second roundtrip\n df2 = tbl2.to_pandas()\n expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))\n\n tm.assert_frame_equal(df2, expected)\n\n assert md2['columns'] == [\n {\n 'name': 'c1',\n 'field_name': 'c1',\n 'metadata': None,\n 'numpy_type': 'object',\n 'pandas_type': 'list[unicode]',\n },\n {\n 'name': 'c2',\n 'field_name': 'c2',\n 'metadata': None,\n 'numpy_type': 'object',\n 'pandas_type': 'list[empty]',\n }\n ]\n\n def test_metadata_pandas_version(self):\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})\n table = pa.Table.from_pandas(df)\n assert table.schema.pandas_metadata['pandas_version'] is not None\n\n\nclass TestConvertPrimitiveTypes:\n \"\"\"\n Conversion tests for primitive (e.g. numeric) types.\n \"\"\"\n\n def test_float_no_nulls(self):\n data = {}\n fields = []\n dtypes = [('f2', pa.float16()),\n ('f4', pa.float32()),\n ('f8', pa.float64())]\n num_values = 100\n\n for numpy_dtype, arrow_dtype in dtypes:\n values = np.random.randn(num_values)\n data[numpy_dtype] = values.astype(numpy_dtype)\n fields.append(pa.field(numpy_dtype, arrow_dtype))\n\n df = pd.DataFrame(data)\n schema = pa.schema(fields)\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_float_nulls(self):\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n dtypes = [('f2', pa.float16()),\n ('f4', pa.float32()),\n ('f8', pa.float64())]\n names = ['f2', 'f4', 'f8']\n expected_cols = []\n\n arrays = []\n fields = []\n for name, arrow_dtype in dtypes:\n values = np.random.randn(num_values).astype(name)\n\n arr = pa.array(values, from_pandas=True, mask=null_mask)\n arrays.append(arr)\n fields.append(pa.field(name, arrow_dtype))\n values[null_mask] = np.nan\n\n expected_cols.append(values)\n\n ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),\n columns=names)\n\n table = pa.Table.from_arrays(arrays, names)\n assert table.schema.equals(pa.schema(fields))\n result = table.to_pandas()\n tm.assert_frame_equal(result, ex_frame)\n\n def test_float_nulls_to_ints(self):\n # ARROW-2135\n df = pd.DataFrame({\"a\": [1.0, 2.0, np.NaN]})\n schema = pa.schema([pa.field(\"a\", pa.int16(), nullable=True)])\n table = pa.Table.from_pandas(df, schema=schema, safe=False)\n assert table[0].to_pylist() == [1, 2, None]\n tm.assert_frame_equal(df, table.to_pandas())\n\n def test_float_nulls_to_boolean(self):\n s = pd.Series([0.0, 1.0, 2.0, None, -3.0])\n expected = pd.Series([False, True, True, None, True])\n _check_array_roundtrip(s, expected=expected, type=pa.bool_())\n\n def test_series_from_pandas_false_respected(self):\n # Check that explicit from_pandas=False is respected\n s = pd.Series([0.0, np.nan])\n arr = pa.array(s, from_pandas=False)\n assert arr.null_count == 0\n assert np.isnan(arr[1].as_py())\n\n def test_integer_no_nulls(self):\n data = OrderedDict()\n fields = []\n\n numpy_dtypes = [\n ('i1', pa.int8()), ('i2', pa.int16()),\n ('i4', pa.int32()), ('i8', pa.int64()),\n ('u1', pa.uint8()), ('u2', pa.uint16()),\n ('u4', pa.uint32()), ('u8', pa.uint64()),\n ('longlong', pa.int64()), ('ulonglong', pa.uint64())\n ]\n num_values = 100\n\n for dtype, arrow_dtype in numpy_dtypes:\n info = np.iinfo(dtype)\n values = np.random.randint(max(info.min, np.iinfo(np.int_).min),\n min(info.max, np.iinfo(np.int_).max),\n size=num_values)\n data[dtype] = values.astype(dtype)\n fields.append(pa.field(dtype, arrow_dtype))\n\n df = pd.DataFrame(data)\n schema = pa.schema(fields)\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_all_integer_types(self):\n # Test all Numpy integer aliases\n data = OrderedDict()\n numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',\n 'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',\n 'int_', 'uint', 'longlong', 'ulonglong']\n for dtype in numpy_dtypes:\n data[dtype] = np.arange(12, dtype=dtype)\n df = pd.DataFrame(data)\n _check_pandas_roundtrip(df)\n\n # Do the same with pa.array()\n # (for some reason, it doesn't use the same code paths at all)\n for np_arr in data.values():\n arr = pa.array(np_arr)\n assert arr.to_pylist() == np_arr.tolist()\n\n def test_integer_byteorder(self):\n # Byteswapped arrays are not supported yet\n int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']\n for dt in int_dtypes:\n for order in '=<>':\n data = np.array([1, 2, 42], dtype=order + dt)\n for np_arr in (data, data[::2]):\n if data.dtype.isnative:\n arr = pa.array(data)\n assert arr.to_pylist() == data.tolist()\n else:\n with pytest.raises(NotImplementedError):\n arr = pa.array(data)\n\n def test_integer_with_nulls(self):\n # pandas requires upcast to float dtype\n\n int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n\n expected_cols = []\n arrays = []\n for name in int_dtypes:\n values = np.random.randint(0, 100, size=num_values)\n\n arr = pa.array(values, mask=null_mask)\n arrays.append(arr)\n\n expected = values.astype('f8')\n expected[null_mask] = np.nan\n\n expected_cols.append(expected)\n\n ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),\n columns=int_dtypes)\n\n table = pa.Table.from_arrays(arrays, int_dtypes)\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, ex_frame)\n\n def test_array_from_pandas_type_cast(self):\n arr = np.arange(10, dtype='int64')\n\n target_type = pa.int8()\n\n result = pa.array(arr, type=target_type)\n expected = pa.array(arr.astype('int8'))\n assert result.equals(expected)\n\n def test_boolean_no_nulls(self):\n num_values = 100\n\n np.random.seed(0)\n\n df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_boolean_nulls(self):\n # pandas requires upcast to object dtype\n num_values = 100\n np.random.seed(0)\n\n mask = np.random.randint(0, 10, size=num_values) < 3\n values = np.random.randint(0, 10, size=num_values) < 5\n\n arr = pa.array(values, mask=mask)\n\n expected = values.astype(object)\n expected[mask] = None\n\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n ex_frame = pd.DataFrame({'bools': expected})\n\n table = pa.Table.from_arrays([arr], ['bools'])\n assert table.schema.equals(schema)\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, ex_frame)\n\n def test_boolean_to_int(self):\n # test from dtype=bool\n s = pd.Series([True, True, False, True, True] * 2)\n expected = pd.Series([1, 1, 0, 1, 1] * 2)\n _check_array_roundtrip(s, expected=expected, type=pa.int64())\n\n def test_boolean_objects_to_int(self):\n # test from dtype=object\n s = pd.Series([True, True, False, True, True] * 2, dtype=object)\n expected = pd.Series([1, 1, 0, 1, 1] * 2)\n expected_msg = 'Expected integer, got bool'\n with pytest.raises(pa.ArrowTypeError, match=expected_msg):\n _check_array_roundtrip(s, expected=expected, type=pa.int64())\n\n def test_boolean_nulls_to_float(self):\n # test from dtype=object\n s = pd.Series([True, True, False, None, True] * 2)\n expected = pd.Series([1.0, 1.0, 0.0, None, 1.0] * 2)\n _check_array_roundtrip(s, expected=expected, type=pa.float64())\n\n def test_boolean_multiple_columns(self):\n # ARROW-6325 (multiple columns resulting in strided conversion)\n df = pd.DataFrame(np.ones((3, 2), dtype='bool'), columns=['a', 'b'])\n _check_pandas_roundtrip(df)\n\n def test_float_object_nulls(self):\n arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)\n df = pd.DataFrame({'floats': arr})\n expected = pd.DataFrame({'floats': pd.to_numeric(arr)})\n field = pa.field('floats', pa.float64())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected=expected,\n expected_schema=schema)\n\n def test_float_with_null_as_integer(self):\n # ARROW-2298\n s = pd.Series([np.nan, 1., 2., np.nan])\n\n types = [pa.int8(), pa.int16(), pa.int32(), pa.int64(),\n pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]\n for ty in types:\n result = pa.array(s, type=ty)\n expected = pa.array([None, 1, 2, None], type=ty)\n assert result.equals(expected)\n\n df = pd.DataFrame({'has_nulls': s})\n schema = pa.schema([pa.field('has_nulls', ty)])\n result = pa.Table.from_pandas(df, schema=schema,\n preserve_index=False)\n assert result[0].chunk(0).equals(expected)\n\n def test_int_object_nulls(self):\n arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)\n df = pd.DataFrame({'ints': arr})\n expected = pd.DataFrame({'ints': pd.to_numeric(arr)})\n field = pa.field('ints', pa.int64())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected=expected,\n expected_schema=schema)\n\n def test_boolean_object_nulls(self):\n arr = np.array([False, None, True] * 100, dtype=object)\n df = pd.DataFrame({'bools': arr})\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_all_nulls_cast_numeric(self):\n arr = np.array([None], dtype=object)\n\n def _check_type(t):\n a2 = pa.array(arr, type=t)\n assert a2.type == t\n assert a2[0].as_py() is None\n\n _check_type(pa.int32())\n _check_type(pa.float64())\n\n def test_half_floats_from_numpy(self):\n arr = np.array([1.5, np.nan], dtype=np.float16)\n a = pa.array(arr, type=pa.float16())\n x, y = a.to_pylist()\n assert isinstance(x, np.float16)\n assert x == 1.5\n assert isinstance(y, np.float16)\n assert np.isnan(y)\n\n a = pa.array(arr, type=pa.float16(), from_pandas=True)\n x, y = a.to_pylist()\n assert isinstance(x, np.float16)\n assert x == 1.5\n assert y is None\n\n\[email protected]('dtype',\n ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])\ndef test_array_integer_object_nulls_option(dtype):\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n values = np.random.randint(0, 100, size=num_values, dtype=dtype)\n\n array = pa.array(values, mask=null_mask)\n\n if null_mask.any():\n expected = values.astype('O')\n expected[null_mask] = None\n else:\n expected = values\n\n result = array.to_pandas(integer_object_nulls=True)\n\n np.testing.assert_equal(result, expected)\n\n\[email protected]('dtype',\n ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])\ndef test_table_integer_object_nulls_option(dtype):\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n values = np.random.randint(0, 100, size=num_values, dtype=dtype)\n\n array = pa.array(values, mask=null_mask)\n\n if null_mask.any():\n expected = values.astype('O')\n expected[null_mask] = None\n else:\n expected = values\n\n expected = pd.DataFrame({dtype: expected})\n\n table = pa.Table.from_arrays([array], [dtype])\n result = table.to_pandas(integer_object_nulls=True)\n\n tm.assert_frame_equal(result, expected)\n\n\nclass TestConvertDateTimeLikeTypes:\n \"\"\"\n Conversion tests for datetime- and timestamp-like types (date64, etc.).\n \"\"\"\n\n def test_timestamps_notimezone_no_nulls(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n field = pa.field('datetime64', pa.timestamp('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n def test_timestamps_notimezone_nulls(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n field = pa.field('datetime64', pa.timestamp('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n def test_timestamps_with_timezone(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123',\n '2006-01-13T12:34:56.432',\n '2010-08-13T05:46:57.437'],\n dtype='datetime64[ms]')\n })\n df['datetime64'] = df['datetime64'].dt.tz_localize('US/Eastern')\n _check_pandas_roundtrip(df)\n\n _check_series_roundtrip(df['datetime64'])\n\n # drop-in a null and ns instead of ms\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n df['datetime64'] = df['datetime64'].dt.tz_localize('US/Eastern')\n\n _check_pandas_roundtrip(df)\n\n def test_python_datetime(self):\n # ARROW-2106\n date_array = [datetime.today() + timedelta(days=x) for x in range(10)]\n df = pd.DataFrame({\n 'datetime': pd.Series(date_array, dtype=object)\n })\n\n table = pa.Table.from_pandas(df)\n assert isinstance(table[0].chunk(0), pa.TimestampArray)\n\n result = table.to_pandas()\n expected_df = pd.DataFrame({\n 'datetime': date_array\n })\n tm.assert_frame_equal(expected_df, result)\n\n def test_python_datetime_with_pytz_tzinfo(self):\n for tz in [pytz.utc, pytz.timezone('US/Eastern'), pytz.FixedOffset(1)]:\n values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz)]\n df = pd.DataFrame({'datetime': values})\n _check_pandas_roundtrip(df)\n\n @h.given(st.none() | tzst.timezones())\n def test_python_datetime_with_pytz_timezone(self, tz):\n values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz)]\n df = pd.DataFrame({'datetime': values})\n _check_pandas_roundtrip(df)\n\n def test_python_datetime_with_timezone_tzinfo(self):\n from datetime import timezone\n\n values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=pytz.utc)]\n df = pd.DataFrame({'datetime': values})\n _check_pandas_roundtrip(df)\n\n # datetime.timezone is going to be pytz.FixedOffset\n hours = 1\n tz_timezone = timezone(timedelta(hours=hours))\n tz_pytz = pytz.FixedOffset(hours * 60)\n values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz_timezone)]\n values_exp = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz_pytz)]\n df = pd.DataFrame({'datetime': values})\n df_exp = pd.DataFrame({'datetime': values_exp})\n _check_pandas_roundtrip(df, expected=df_exp)\n\n def test_python_datetime_subclass(self):\n\n class MyDatetime(datetime):\n # see https://github.com/pandas-dev/pandas/issues/21142\n nanosecond = 0.0\n\n date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]\n df = pd.DataFrame({\"datetime\": pd.Series(date_array, dtype=object)})\n\n table = pa.Table.from_pandas(df)\n assert isinstance(table[0].chunk(0), pa.TimestampArray)\n\n result = table.to_pandas()\n expected_df = pd.DataFrame({\"datetime\": date_array})\n\n # https://github.com/pandas-dev/pandas/issues/21142\n expected_df[\"datetime\"] = pd.to_datetime(expected_df[\"datetime\"])\n\n tm.assert_frame_equal(expected_df, result)\n\n def test_python_date_subclass(self):\n\n class MyDate(date):\n pass\n\n date_array = [MyDate(2000, 1, 1)]\n df = pd.DataFrame({\"date\": pd.Series(date_array, dtype=object)})\n\n table = pa.Table.from_pandas(df)\n assert isinstance(table[0].chunk(0), pa.Date32Array)\n\n result = table.to_pandas()\n expected_df = pd.DataFrame(\n {\"date\": np.array([date(2000, 1, 1)], dtype=object)}\n )\n tm.assert_frame_equal(expected_df, result)\n\n def test_datetime64_to_date32(self):\n # ARROW-1718\n arr = pa.array([date(2017, 10, 23), None])\n c = pa.chunked_array([arr])\n s = c.to_pandas()\n\n arr2 = pa.Array.from_pandas(s, type=pa.date32())\n\n assert arr2.equals(arr.cast('date32'))\n\n @pytest.mark.parametrize('mask', [\n None,\n np.array([True, False, False]),\n ])\n def test_pandas_datetime_to_date64(self, mask):\n s = pd.to_datetime([\n '2018-05-10T00:00:00',\n '2018-05-11T00:00:00',\n '2018-05-12T00:00:00',\n ])\n arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)\n\n data = np.array([\n date(2018, 5, 10),\n date(2018, 5, 11),\n date(2018, 5, 12)\n ])\n expected = pa.array(data, mask=mask, type=pa.date64())\n\n assert arr.equals(expected)\n\n @pytest.mark.parametrize('mask', [\n None,\n np.array([True, False, False])\n ])\n def test_pandas_datetime_to_date64_failures(self, mask):\n s = pd.to_datetime([\n '2018-05-10T10:24:01',\n '2018-05-11T10:24:01',\n '2018-05-12T10:24:01',\n ])\n\n expected_msg = 'Timestamp value had non-zero intraday milliseconds'\n with pytest.raises(pa.ArrowInvalid, match=expected_msg):\n pa.Array.from_pandas(s, type=pa.date64(), mask=mask)\n\n def test_array_types_date_as_object(self):\n data = [date(2000, 1, 1),\n None,\n date(1970, 1, 1),\n date(2040, 2, 26)]\n expected_d = np.array(['2000-01-01', None, '1970-01-01',\n '2040-02-26'], dtype='datetime64[D]')\n\n expected_ns = np.array(['2000-01-01', None, '1970-01-01',\n '2040-02-26'], dtype='datetime64[ns]')\n\n objects = [pa.array(data),\n pa.chunked_array([data])]\n\n for obj in objects:\n result = obj.to_pandas()\n expected_obj = expected_d.astype(object)\n assert result.dtype == expected_obj.dtype\n npt.assert_array_equal(result, expected_obj)\n\n result = obj.to_pandas(date_as_object=False)\n assert result.dtype == expected_ns.dtype\n npt.assert_array_equal(result, expected_ns)\n\n def test_table_convert_date_as_object(self):\n df = pd.DataFrame({\n 'date': [date(2000, 1, 1),\n None,\n date(1970, 1, 1),\n date(2040, 2, 26)]})\n\n table = pa.Table.from_pandas(df, preserve_index=False)\n\n df_datetime = table.to_pandas(date_as_object=False)\n df_object = table.to_pandas()\n\n tm.assert_frame_equal(df.astype('datetime64[ns]'), df_datetime,\n check_dtype=True)\n tm.assert_frame_equal(df, df_object, check_dtype=True)\n\n def test_date_infer(self):\n df = pd.DataFrame({\n 'date': [date(2000, 1, 1),\n None,\n date(1970, 1, 1),\n date(2040, 2, 26)]})\n table = pa.Table.from_pandas(df, preserve_index=False)\n field = pa.field('date', pa.date32())\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = pa.schema([field], metadata=table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n result = table.to_pandas()\n tm.assert_frame_equal(result, df)\n\n def test_date_mask(self):\n arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],\n dtype='datetime64[D]')\n mask = [True, False]\n result = pa.array(arr, mask=np.array(mask))\n expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')\n expected = pa.array(expected, from_pandas=True)\n assert expected.equals(result)\n\n def test_date_objects_typed(self):\n arr = np.array([\n date(2017, 4, 3),\n None,\n date(2017, 4, 4),\n date(2017, 4, 5)], dtype=object)\n\n arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')\n arr_i8 = arr_i4.astype('int64') * 86400000\n mask = np.array([False, True, False, False])\n\n t32 = pa.date32()\n t64 = pa.date64()\n\n a32 = pa.array(arr, type=t32)\n a64 = pa.array(arr, type=t64)\n\n a32_expected = pa.array(arr_i4, mask=mask, type=t32)\n a64_expected = pa.array(arr_i8, mask=mask, type=t64)\n\n assert a32.equals(a32_expected)\n assert a64.equals(a64_expected)\n\n # Test converting back to pandas\n colnames = ['date32', 'date64']\n table = pa.Table.from_arrays([a32, a64], colnames)\n\n ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',\n '2017-04-05'],\n dtype='datetime64[D]'))\n ex_values[1] = pd.NaT.value\n\n ex_datetime64ns = ex_values.astype('datetime64[ns]')\n expected_pandas = pd.DataFrame({'date32': ex_datetime64ns,\n 'date64': ex_datetime64ns},\n columns=colnames)\n table_pandas = table.to_pandas(date_as_object=False)\n tm.assert_frame_equal(table_pandas, expected_pandas)\n\n table_pandas_objects = table.to_pandas()\n ex_objects = ex_values.astype('object')\n expected_pandas_objects = pd.DataFrame({'date32': ex_objects,\n 'date64': ex_objects},\n columns=colnames)\n tm.assert_frame_equal(table_pandas_objects,\n expected_pandas_objects)\n\n def test_object_null_values(self):\n # ARROW-842\n NA = getattr(pd, 'NA', None)\n values = np.array([datetime(2000, 1, 1), pd.NaT, NA], dtype=object)\n values_with_none = np.array([datetime(2000, 1, 1), None, None],\n dtype=object)\n result = pa.array(values, from_pandas=True)\n expected = pa.array(values_with_none, from_pandas=True)\n assert result.equals(expected)\n assert result.null_count == 2\n\n def test_dates_from_integers(self):\n t1 = pa.date32()\n t2 = pa.date64()\n\n arr = np.array([17259, 17260, 17261], dtype='int32')\n arr2 = arr.astype('int64') * 86400000\n\n a1 = pa.array(arr, type=t1)\n a2 = pa.array(arr2, type=t2)\n\n expected = date(2017, 4, 3)\n assert a1[0].as_py() == expected\n assert a2[0].as_py() == expected\n\n def test_pytime_from_pandas(self):\n pytimes = [time(1, 2, 3, 1356),\n time(4, 5, 6, 1356)]\n\n # microseconds\n t1 = pa.time64('us')\n\n aobjs = np.array(pytimes + [None], dtype=object)\n parr = pa.array(aobjs)\n assert parr.type == t1\n assert parr[0].as_py() == pytimes[0]\n assert parr[1].as_py() == pytimes[1]\n assert parr[2].as_py() is None\n\n # DataFrame\n df = pd.DataFrame({'times': aobjs})\n batch = pa.RecordBatch.from_pandas(df)\n assert batch[0].equals(parr)\n\n # Test ndarray of int64 values\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n\n a1 = pa.array(arr, type=pa.time64('us'))\n assert a1[0].as_py() == pytimes[0]\n\n a2 = pa.array(arr * 1000, type=pa.time64('ns'))\n assert a2[0].as_py() == pytimes[0]\n\n a3 = pa.array((arr / 1000).astype('i4'),\n type=pa.time32('ms'))\n assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)\n\n a4 = pa.array((arr / 1000000).astype('i4'),\n type=pa.time32('s'))\n assert a4[0].as_py() == pytimes[0].replace(microsecond=0)\n\n def test_arrow_time_to_pandas(self):\n pytimes = [time(1, 2, 3, 1356),\n time(4, 5, 6, 1356),\n time(0, 0, 0)]\n\n expected = np.array(pytimes[:2] + [None])\n expected_ms = np.array([x.replace(microsecond=1000)\n for x in pytimes[:2]] +\n [None])\n expected_s = np.array([x.replace(microsecond=0)\n for x in pytimes[:2]] +\n [None])\n\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n\n null_mask = np.array([False, False, True], dtype=bool)\n\n a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))\n a2 = pa.array(arr * 1000, mask=null_mask,\n type=pa.time64('ns'))\n\n a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,\n type=pa.time32('ms'))\n a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,\n type=pa.time32('s'))\n\n names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']\n batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)\n\n for arr, expected_values in [(a1, expected),\n (a2, expected),\n (a3, expected_ms),\n (a4, expected_s)]:\n result_pandas = arr.to_pandas()\n assert (result_pandas.values == expected_values).all()\n\n df = batch.to_pandas()\n expected_df = pd.DataFrame({'time64[us]': expected,\n 'time64[ns]': expected,\n 'time32[ms]': expected_ms,\n 'time32[s]': expected_s},\n columns=names)\n\n tm.assert_frame_equal(df, expected_df)\n\n def test_numpy_datetime64_columns(self):\n datetime64_ns = np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n _check_array_from_pandas_roundtrip(datetime64_ns)\n\n datetime64_us = np.array([\n '2007-07-13T01:23:34.123456',\n None,\n '2006-01-13T12:34:56.432539',\n '2010-08-13T05:46:57.437699'],\n dtype='datetime64[us]')\n _check_array_from_pandas_roundtrip(datetime64_us)\n\n datetime64_ms = np.array([\n '2007-07-13T01:23:34.123',\n None,\n '2006-01-13T12:34:56.432',\n '2010-08-13T05:46:57.437'],\n dtype='datetime64[ms]')\n _check_array_from_pandas_roundtrip(datetime64_ms)\n\n datetime64_s = np.array([\n '2007-07-13T01:23:34',\n None,\n '2006-01-13T12:34:56',\n '2010-08-13T05:46:57'],\n dtype='datetime64[s]')\n _check_array_from_pandas_roundtrip(datetime64_s)\n\n def test_timestamp_to_pandas_ns(self):\n # non-ns timestamp gets cast to ns on conversion to pandas\n arr = pa.array([1, 2, 3], pa.timestamp('ms'))\n expected = pd.Series(pd.to_datetime([1, 2, 3], unit='ms'))\n s = arr.to_pandas()\n tm.assert_series_equal(s, expected)\n arr = pa.chunked_array([arr])\n s = arr.to_pandas()\n tm.assert_series_equal(s, expected)\n\n def test_timestamp_to_pandas_out_of_bounds(self):\n # ARROW-7758 check for out of bounds timestamps for non-ns timestamps\n\n for unit in ['s', 'ms', 'us']:\n for tz in [None, 'America/New_York']:\n arr = pa.array([datetime(1, 1, 1)], pa.timestamp(unit, tz=tz))\n table = pa.table({'a': arr})\n\n msg = \"would result in out of bounds timestamp\"\n with pytest.raises(ValueError, match=msg):\n arr.to_pandas()\n\n with pytest.raises(ValueError, match=msg):\n table.to_pandas()\n\n with pytest.raises(ValueError, match=msg):\n # chunked array\n table.column('a').to_pandas()\n\n # just ensure those don't give an error, but do not\n # check actual garbage output\n arr.to_pandas(safe=False)\n table.to_pandas(safe=False)\n table.column('a').to_pandas(safe=False)\n\n def test_timestamp_to_pandas_empty_chunked(self):\n # ARROW-7907 table with chunked array with 0 chunks\n table = pa.table({'a': pa.chunked_array([], type=pa.timestamp('us'))})\n result = table.to_pandas()\n expected = pd.DataFrame({'a': pd.Series([], dtype=\"datetime64[ns]\")})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])\n def test_numpy_datetime64_day_unit(self, dtype):\n datetime64_d = np.array([\n '2007-07-13',\n None,\n '2006-01-15',\n '2010-08-19'],\n dtype='datetime64[D]')\n _check_array_from_pandas_roundtrip(datetime64_d, type=dtype)\n\n def test_array_from_pandas_date_with_mask(self):\n m = np.array([True, False, True])\n data = pd.Series([\n date(1990, 1, 1),\n date(1991, 1, 1),\n date(1992, 1, 1)\n ])\n\n result = pa.Array.from_pandas(data, mask=m)\n\n expected = pd.Series([None, date(1991, 1, 1), None])\n assert pa.Array.from_pandas(expected).equals(result)\n\n def test_fixed_offset_timezone(self):\n df = pd.DataFrame({\n 'a': [\n pd.Timestamp('2012-11-11 00:00:00+01:00'),\n pd.NaT\n ]\n })\n _check_pandas_roundtrip(df)\n _check_serialize_components_roundtrip(df)\n\n def test_timedeltas_no_nulls(self):\n df = pd.DataFrame({\n 'timedelta64': np.array([0, 3600000000000, 7200000000000],\n dtype='timedelta64[ns]')\n })\n field = pa.field('timedelta64', pa.duration('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n def test_timedeltas_nulls(self):\n df = pd.DataFrame({\n 'timedelta64': np.array([0, None, 7200000000000],\n dtype='timedelta64[ns]')\n })\n field = pa.field('timedelta64', pa.duration('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n\n# ----------------------------------------------------------------------\n# Conversion tests for string and binary types.\n\n\nclass TestConvertStringLikeTypes:\n\n def test_pandas_unicode(self):\n repeats = 1000\n values = ['foo', None, 'bar', 'mañana', np.nan]\n df = pd.DataFrame({'strings': values * repeats})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_bytes_to_binary(self):\n values = ['qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]\n df = pd.DataFrame({'strings': values})\n\n table = pa.Table.from_pandas(df)\n assert table[0].type == pa.binary()\n\n values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]\n expected = pd.DataFrame({'strings': values2})\n _check_pandas_roundtrip(df, expected)\n\n @pytest.mark.large_memory\n def test_bytes_exceed_2gb(self):\n v1 = b'x' * 100000000\n v2 = b'x' * 147483646\n\n # ARROW-2227, hit exactly 2GB on the nose\n df = pd.DataFrame({\n 'strings': [v1] * 20 + [v2] + ['x'] * 20\n })\n arr = pa.array(df['strings'])\n assert isinstance(arr, pa.ChunkedArray)\n assert arr.num_chunks == 2\n arr = None\n\n table = pa.Table.from_pandas(df)\n assert table[0].num_chunks == 2\n\n def test_fixed_size_bytes(self):\n values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']\n df = pd.DataFrame({'strings': values})\n schema = pa.schema([pa.field('strings', pa.binary(3))])\n table = pa.Table.from_pandas(df, schema=schema)\n assert table.schema[0].type == schema[0].type\n assert table.schema[0].name == schema[0].name\n result = table.to_pandas()\n tm.assert_frame_equal(result, df)\n\n def test_fixed_size_bytes_does_not_accept_varying_lengths(self):\n values = [b'foo', None, b'ba', None, None, b'hey']\n df = pd.DataFrame({'strings': values})\n schema = pa.schema([pa.field('strings', pa.binary(3))])\n with pytest.raises(pa.ArrowInvalid):\n pa.Table.from_pandas(df, schema=schema)\n\n def test_variable_size_bytes(self):\n s = pd.Series([b'123', b'', b'a', None])\n _check_series_roundtrip(s, type_=pa.binary())\n\n def test_binary_from_bytearray(self):\n s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),\n None])\n # Explicitly set type\n _check_series_roundtrip(s, type_=pa.binary())\n # Infer type from bytearrays\n _check_series_roundtrip(s, expected_pa_type=pa.binary())\n\n def test_large_binary(self):\n s = pd.Series([b'123', b'', b'a', None])\n _check_series_roundtrip(s, type_=pa.large_binary())\n df = pd.DataFrame({'a': s})\n _check_pandas_roundtrip(\n df, schema=pa.schema([('a', pa.large_binary())]))\n\n def test_large_string(self):\n s = pd.Series(['123', '', 'a', None])\n _check_series_roundtrip(s, type_=pa.large_string())\n df = pd.DataFrame({'a': s})\n _check_pandas_roundtrip(\n df, schema=pa.schema([('a', pa.large_string())]))\n\n def test_table_empty_str(self):\n values = ['', '', '', '', '']\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result1 = table.to_pandas(strings_to_categorical=False)\n expected1 = pd.DataFrame({'strings': values})\n tm.assert_frame_equal(result1, expected1, check_dtype=True)\n\n result2 = table.to_pandas(strings_to_categorical=True)\n expected2 = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result2, expected2, check_dtype=True)\n\n def test_selective_categoricals(self):\n values = ['', '', '', '', '']\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n expected_str = pd.DataFrame({'strings': values})\n expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})\n\n result1 = table.to_pandas(categories=['strings'])\n tm.assert_frame_equal(result1, expected_cat, check_dtype=True)\n result2 = table.to_pandas(categories=[])\n tm.assert_frame_equal(result2, expected_str, check_dtype=True)\n result3 = table.to_pandas(categories=('strings',))\n tm.assert_frame_equal(result3, expected_cat, check_dtype=True)\n result4 = table.to_pandas(categories=tuple())\n tm.assert_frame_equal(result4, expected_str, check_dtype=True)\n\n def test_to_pandas_categorical_zero_length(self):\n # ARROW-3586\n array = pa.array([], type=pa.int32())\n table = pa.Table.from_arrays(arrays=[array], names=['col'])\n # This would segfault under 0.11.0\n table.to_pandas(categories=['col'])\n\n def test_to_pandas_categories_already_dictionary(self):\n # Showed up in ARROW-6434, ARROW-6435\n array = pa.array(['foo', 'foo', 'foo', 'bar']).dictionary_encode()\n table = pa.Table.from_arrays(arrays=[array], names=['col'])\n result = table.to_pandas(categories=['col'])\n assert table.to_pandas().equals(result)\n\n def test_table_str_to_categorical_without_na(self):\n values = ['a', 'a', 'b', 'b', 'c']\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result = table.to_pandas(strings_to_categorical=True)\n expected = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result, expected, check_dtype=True)\n\n with pytest.raises(pa.ArrowInvalid):\n table.to_pandas(strings_to_categorical=True,\n zero_copy_only=True)\n\n def test_table_str_to_categorical_with_na(self):\n values = [None, 'a', 'b', np.nan]\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result = table.to_pandas(strings_to_categorical=True)\n expected = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result, expected, check_dtype=True)\n\n with pytest.raises(pa.ArrowInvalid):\n table.to_pandas(strings_to_categorical=True,\n zero_copy_only=True)\n\n # Regression test for ARROW-2101\n def test_array_of_bytes_to_strings(self):\n converted = pa.array(np.array([b'x'], dtype=object), pa.string())\n assert converted.type == pa.string()\n\n # Make sure that if an ndarray of bytes is passed to the array\n # constructor and the type is string, it will fail if those bytes\n # cannot be converted to utf-8\n def test_array_of_bytes_to_strings_bad_data(self):\n with pytest.raises(\n pa.lib.ArrowInvalid,\n match=\"was not a utf8 string\"):\n pa.array(np.array([b'\\x80\\x81'], dtype=object), pa.string())\n\n def test_numpy_string_array_to_fixed_size_binary(self):\n arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')\n\n converted = pa.array(arr, type=pa.binary(3))\n expected = pa.array(list(arr), type=pa.binary(3))\n assert converted.equals(expected)\n\n mask = np.array([True, False, True])\n converted = pa.array(arr, type=pa.binary(3), mask=mask)\n expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))\n assert converted.equals(expected)\n\n with pytest.raises(pa.lib.ArrowInvalid,\n match=r'Got bytestring of length 3 \\(expected 4\\)'):\n arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')\n pa.array(arr, type=pa.binary(4))\n\n with pytest.raises(\n pa.lib.ArrowInvalid,\n match=r'Got bytestring of length 12 \\(expected 3\\)'):\n arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')\n pa.array(arr, type=pa.binary(3))\n\n\nclass TestConvertDecimalTypes:\n \"\"\"\n Conversion test for decimal types.\n \"\"\"\n decimal32 = [\n decimal.Decimal('-1234.123'),\n decimal.Decimal('1234.439')\n ]\n decimal64 = [\n decimal.Decimal('-129934.123331'),\n decimal.Decimal('129534.123731')\n ]\n decimal128 = [\n decimal.Decimal('394092382910493.12341234678'),\n decimal.Decimal('-314292388910493.12343437128')\n ]\n\n @pytest.mark.parametrize(('values', 'expected_type'), [\n pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),\n pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),\n pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')\n ])\n def test_decimal_from_pandas(self, values, expected_type):\n expected = pd.DataFrame({'decimals': values})\n table = pa.Table.from_pandas(expected, preserve_index=False)\n field = pa.field('decimals', expected_type)\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = pa.schema([field], metadata=table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n @pytest.mark.parametrize('values', [\n pytest.param(decimal32, id='decimal32'),\n pytest.param(decimal64, id='decimal64'),\n pytest.param(decimal128, id='decimal128')\n ])\n def test_decimal_to_pandas(self, values):\n expected = pd.DataFrame({'decimals': values})\n converted = pa.Table.from_pandas(expected)\n df = converted.to_pandas()\n tm.assert_frame_equal(df, expected)\n\n def test_decimal_fails_with_truncation(self):\n data1 = [decimal.Decimal('1.234')]\n type1 = pa.decimal128(10, 2)\n with pytest.raises(pa.ArrowInvalid):\n pa.array(data1, type=type1)\n\n data2 = [decimal.Decimal('1.2345')]\n type2 = pa.decimal128(10, 3)\n with pytest.raises(pa.ArrowInvalid):\n pa.array(data2, type=type2)\n\n def test_decimal_with_different_precisions(self):\n data = [\n decimal.Decimal('0.01'),\n decimal.Decimal('0.001'),\n ]\n series = pd.Series(data)\n array = pa.array(series)\n assert array.to_pylist() == data\n assert array.type == pa.decimal128(3, 3)\n\n array = pa.array(data, type=pa.decimal128(12, 5))\n expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]\n assert array.to_pylist() == expected\n\n def test_decimal_with_None_explicit_type(self):\n series = pd.Series([decimal.Decimal('3.14'), None])\n _check_series_roundtrip(series, type_=pa.decimal128(12, 5))\n\n # Test that having all None values still produces decimal array\n series = pd.Series([None] * 2)\n _check_series_roundtrip(series, type_=pa.decimal128(12, 5))\n\n def test_decimal_with_None_infer_type(self):\n series = pd.Series([decimal.Decimal('3.14'), None])\n _check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))\n\n def test_strided_objects(self, tmpdir):\n # see ARROW-3053\n data = {\n 'a': {0: 'a'},\n 'b': {0: decimal.Decimal('0.0')}\n }\n\n # This yields strided objects\n df = pd.DataFrame.from_dict(data)\n _check_pandas_roundtrip(df)\n\n\nclass TestConvertListTypes:\n \"\"\"\n Conversion tests for list<> types.\n \"\"\"\n\n def test_column_of_arrays(self):\n df, schema = dataframe_with_arrays()\n _check_pandas_roundtrip(df, schema=schema, expected_schema=schema)\n table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = schema.with_metadata(table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n for column in df.columns:\n field = schema.field(column)\n _check_array_roundtrip(df[column], type=field.type)\n\n def test_column_of_arrays_to_py(self):\n # Test regression in ARROW-1199 not caught in above test\n dtype = 'i1'\n arr = np.array([\n np.arange(10, dtype=dtype),\n np.arange(5, dtype=dtype),\n None,\n np.arange(1, dtype=dtype)\n ])\n type_ = pa.list_(pa.int8())\n parr = pa.array(arr, type=type_)\n\n assert parr[0].as_py() == list(range(10))\n assert parr[1].as_py() == list(range(5))\n assert parr[2].as_py() is None\n assert parr[3].as_py() == [0]\n\n def test_column_of_boolean_list(self):\n # ARROW-4370: Table to pandas conversion fails for list of bool\n array = pa.array([[True, False], [True]], type=pa.list_(pa.bool_()))\n table = pa.Table.from_arrays([array], names=['col1'])\n df = table.to_pandas()\n\n expected_df = pd.DataFrame({'col1': [[True, False], [True]]})\n tm.assert_frame_equal(df, expected_df)\n\n s = table[0].to_pandas()\n tm.assert_series_equal(pd.Series(s), df['col1'], check_names=False)\n\n def test_column_of_decimal_list(self):\n array = pa.array([[decimal.Decimal('1'), decimal.Decimal('2')],\n [decimal.Decimal('3.3')]],\n type=pa.list_(pa.decimal128(2, 1)))\n table = pa.Table.from_arrays([array], names=['col1'])\n df = table.to_pandas()\n\n expected_df = pd.DataFrame(\n {'col1': [[decimal.Decimal('1'), decimal.Decimal('2')],\n [decimal.Decimal('3.3')]]})\n tm.assert_frame_equal(df, expected_df)\n\n def test_nested_types_from_ndarray_null_entries(self):\n # Root cause of ARROW-6435\n s = pd.Series(np.array([np.nan, np.nan], dtype=object))\n\n for ty in [pa.list_(pa.int64()),\n pa.large_list(pa.int64()),\n pa.struct([pa.field('f0', 'int32')])]:\n result = pa.array(s, type=ty)\n expected = pa.array([None, None], type=ty)\n assert result.equals(expected)\n\n with pytest.raises(TypeError):\n pa.array(s.values, type=ty)\n\n def test_column_of_lists(self):\n df, schema = dataframe_with_lists()\n _check_pandas_roundtrip(df, schema=schema, expected_schema=schema)\n table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = schema.with_metadata(table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n for column in df.columns:\n field = schema.field(column)\n _check_array_roundtrip(df[column], type=field.type)\n\n def test_column_of_lists_first_empty(self):\n # ARROW-2124\n num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]\n series = pd.Series([np.array(s, dtype=float) for s in num_lists])\n arr = pa.array(series)\n result = pd.Series(arr.to_pandas())\n tm.assert_series_equal(result, series)\n\n def test_column_of_lists_chunked(self):\n # ARROW-1357\n df = pd.DataFrame({\n 'lists': np.array([\n [1, 2],\n None,\n [2, 3],\n [4, 5],\n [6, 7],\n [8, 9]\n ], dtype=object)\n })\n\n schema = pa.schema([\n pa.field('lists', pa.list_(pa.int64()))\n ])\n\n t1 = pa.Table.from_pandas(df[:2], schema=schema)\n t2 = pa.Table.from_pandas(df[2:], schema=schema)\n\n table = pa.concat_tables([t1, t2])\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n def test_empty_column_of_lists_chunked(self):\n df = pd.DataFrame({\n 'lists': np.array([], dtype=object)\n })\n\n schema = pa.schema([\n pa.field('lists', pa.list_(pa.int64()))\n ])\n\n table = pa.Table.from_pandas(df, schema=schema)\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n def test_column_of_lists_chunked2(self):\n data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],\n [12, 13], [14, 15], [16, 17]]\n data2 = [[8, 9], [18, 19]]\n\n a1 = pa.array(data1)\n a2 = pa.array(data2)\n\n t1 = pa.Table.from_arrays([a1], names=['a'])\n t2 = pa.Table.from_arrays([a2], names=['a'])\n\n concatenated = pa.concat_tables([t1, t2])\n\n result = concatenated.to_pandas()\n expected = pd.DataFrame({'a': data1 + data2})\n\n tm.assert_frame_equal(result, expected)\n\n def test_column_of_lists_strided(self):\n df, schema = dataframe_with_lists()\n df = pd.concat([df] * 6, ignore_index=True)\n\n arr = df['int64'].values[::3]\n assert arr.strides[0] != 8\n\n _check_array_roundtrip(arr)\n\n def test_nested_lists_all_none(self):\n data = np.array([[None, None], None], dtype=object)\n\n arr = pa.array(data)\n expected = pa.array(list(data))\n assert arr.equals(expected)\n assert arr.type == pa.list_(pa.null())\n\n data2 = np.array([None, None, [None, None],\n np.array([None, None], dtype=object)],\n dtype=object)\n arr = pa.array(data2)\n expected = pa.array([None, None, [None, None], [None, None]])\n assert arr.equals(expected)\n\n def test_nested_lists_all_empty(self):\n # ARROW-2128\n data = pd.Series([[], [], []])\n arr = pa.array(data)\n expected = pa.array(list(data))\n assert arr.equals(expected)\n assert arr.type == pa.list_(pa.null())\n\n def test_nested_list_first_empty(self):\n # ARROW-2711\n data = pd.Series([[], [\"a\"]])\n arr = pa.array(data)\n expected = pa.array(list(data))\n assert arr.equals(expected)\n assert arr.type == pa.list_(pa.string())\n\n def test_nested_smaller_ints(self):\n # ARROW-1345, ARROW-2008, there were some type inference bugs happening\n # before\n data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])\n result = pa.array(data)\n result2 = pa.array(data.values)\n expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))\n assert result.equals(expected)\n assert result2.equals(expected)\n\n data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])\n result3 = pa.array(data3)\n expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))\n assert result3.equals(expected3)\n\n def test_infer_lists(self):\n data = OrderedDict([\n ('nan_ints', [[None, 1], [2, 3]]),\n ('ints', [[0, 1], [2, 3]]),\n ('strs', [[None, 'b'], ['c', 'd']]),\n ('nested_strs', [[[None, 'b'], ['c', 'd']], None])\n ])\n df = pd.DataFrame(data)\n\n expected_schema = pa.schema([\n pa.field('nan_ints', pa.list_(pa.int64())),\n pa.field('ints', pa.list_(pa.int64())),\n pa.field('strs', pa.list_(pa.string())),\n pa.field('nested_strs', pa.list_(pa.list_(pa.string())))\n ])\n\n _check_pandas_roundtrip(df, expected_schema=expected_schema)\n\n def test_fixed_size_list(self):\n # ARROW-7365\n fixed_ty = pa.list_(pa.int64(), list_size=4)\n variable_ty = pa.list_(pa.int64())\n\n data = [[0, 1, 2, 3], None, [4, 5, 6, 7], [8, 9, 10, 11]]\n fixed_arr = pa.array(data, type=fixed_ty)\n variable_arr = pa.array(data, type=variable_ty)\n\n result = fixed_arr.to_pandas()\n expected = variable_arr.to_pandas()\n\n for left, right in zip(result, expected):\n if left is None:\n assert right is None\n npt.assert_array_equal(left, right)\n\n def test_infer_numpy_array(self):\n data = OrderedDict([\n ('ints', [\n np.array([0, 1], dtype=np.int64),\n np.array([2, 3], dtype=np.int64)\n ])\n ])\n df = pd.DataFrame(data)\n expected_schema = pa.schema([\n pa.field('ints', pa.list_(pa.int64()))\n ])\n\n _check_pandas_roundtrip(df, expected_schema=expected_schema)\n\n def test_to_list_of_structs_pandas(self):\n ints = pa.array([1, 2, 3], pa.int32())\n strings = pa.array([['a', 'b'], ['c', 'd'], ['e', 'f']],\n pa.list_(pa.string()))\n structs = pa.StructArray.from_arrays([ints, strings], ['f1', 'f2'])\n data = pa.ListArray.from_arrays([0, 1, 3], structs)\n\n expected = pd.Series([\n [{'f1': 1, 'f2': ['a', 'b']}],\n [{'f1': 2, 'f2': ['c', 'd']},\n {'f1': 3, 'f2': ['e', 'f']}]\n ])\n\n series = pd.Series(data.to_pandas())\n tm.assert_series_equal(series, expected)\n\n @pytest.mark.parametrize('t,data,expected', [\n (\n pa.int64,\n [[1, 2], [3], None],\n [None, [3], None]\n ),\n (\n pa.string,\n [['aaa', 'bb'], ['c'], None],\n [None, ['c'], None]\n ),\n (\n pa.null,\n [[None, None], [None], None],\n [None, [None], None]\n )\n ])\n def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):\n m = np.array([True, False, True])\n\n s = pd.Series(data)\n result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))\n\n assert pa.Array.from_pandas(expected,\n type=pa.list_(t())).equals(result)\n\n def test_empty_list_roundtrip(self):\n empty_list_array = np.empty((3,), dtype=object)\n empty_list_array.fill([])\n\n df = pd.DataFrame({'a': np.array(['1', '2', '3']),\n 'b': empty_list_array})\n tbl = pa.Table.from_pandas(df)\n\n result = tbl.to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n def test_array_from_nested_arrays(self):\n df, schema = dataframe_with_arrays()\n for field in schema:\n arr = df[field.name].values\n expected = pa.array(list(arr), type=field.type)\n result = pa.array(arr)\n assert result.type == field.type # == list<scalar>\n assert result.equals(expected)\n\n def test_nested_large_list(self):\n s = (pa.array([[[1, 2, 3], [4]], None],\n type=pa.large_list(pa.large_list(pa.int64())))\n .to_pandas())\n tm.assert_series_equal(\n s, pd.Series([[[1, 2, 3], [4]], None]),\n check_names=False)\n\n def test_large_binary_list(self):\n for list_type_factory in (pa.list_, pa.large_list):\n s = (pa.array([[\"aa\", \"bb\"], None, [\"cc\"], []],\n type=list_type_factory(pa.large_binary()))\n .to_pandas())\n tm.assert_series_equal(\n s, pd.Series([[b\"aa\", b\"bb\"], None, [b\"cc\"], []]),\n check_names=False)\n s = (pa.array([[\"aa\", \"bb\"], None, [\"cc\"], []],\n type=list_type_factory(pa.large_string()))\n .to_pandas())\n tm.assert_series_equal(\n s, pd.Series([[\"aa\", \"bb\"], None, [\"cc\"], []]),\n check_names=False)\n\n\nclass TestConvertStructTypes:\n \"\"\"\n Conversion tests for struct types.\n \"\"\"\n\n def test_pandas_roundtrip(self):\n df = pd.DataFrame({'dicts': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]})\n\n expected_schema = pa.schema([\n ('dicts', pa.struct([('a', pa.int64()), ('b', pa.int64())])),\n ])\n\n _check_pandas_roundtrip(df, expected_schema=expected_schema)\n\n # specifying schema explicitly in from_pandas\n _check_pandas_roundtrip(\n df, schema=expected_schema, expected_schema=expected_schema)\n\n def test_to_pandas(self):\n ints = pa.array([None, 2, 3], type=pa.int64())\n strs = pa.array(['a', None, 'c'], type=pa.string())\n bools = pa.array([True, False, None], type=pa.bool_())\n arr = pa.StructArray.from_arrays(\n [ints, strs, bools],\n ['ints', 'strs', 'bools'])\n\n expected = pd.Series([\n {'ints': None, 'strs': 'a', 'bools': True},\n {'ints': 2, 'strs': None, 'bools': False},\n {'ints': 3, 'strs': 'c', 'bools': None},\n ])\n\n series = pd.Series(arr.to_pandas())\n tm.assert_series_equal(series, expected)\n\n def test_from_numpy(self):\n dt = np.dtype([('x', np.int32),\n (('y_title', 'y'), np.bool_)])\n ty = pa.struct([pa.field('x', pa.int32()),\n pa.field('y', pa.bool_())])\n\n data = np.array([], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == []\n\n data = np.array([(42, True), (43, False)], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == [{'x': 42, 'y': True},\n {'x': 43, 'y': False}]\n\n # With mask\n arr = pa.array(data, mask=np.bool_([False, True]), type=ty)\n assert arr.to_pylist() == [{'x': 42, 'y': True}, None]\n\n # Trivial struct type\n dt = np.dtype([])\n ty = pa.struct([])\n\n data = np.array([], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == []\n\n data = np.array([(), ()], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == [{}, {}]\n\n def test_from_numpy_nested(self):\n # Note: an object field inside a struct\n dt = np.dtype([('x', np.dtype([('xx', np.int8),\n ('yy', np.bool_)])),\n ('y', np.int16),\n ('z', np.object_)])\n # Note: itemsize is not a multiple of sizeof(object)\n assert dt.itemsize == 12\n ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()),\n pa.field('yy', pa.bool_())])),\n pa.field('y', pa.int16()),\n pa.field('z', pa.string())])\n\n data = np.array([], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == []\n\n data = np.array([\n ((1, True), 2, 'foo'),\n ((3, False), 4, 'bar')], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == [\n {'x': {'xx': 1, 'yy': True}, 'y': 2, 'z': 'foo'},\n {'x': {'xx': 3, 'yy': False}, 'y': 4, 'z': 'bar'}]\n\n @pytest.mark.large_memory\n def test_from_numpy_large(self):\n # Exercise rechunking + nulls\n target_size = 3 * 1024**3 # 4GB\n dt = np.dtype([('x', np.float64), ('y', 'object')])\n bs = 65536 - dt.itemsize\n block = b'.' * bs\n n = target_size // (bs + dt.itemsize)\n data = np.zeros(n, dtype=dt)\n data['x'] = np.random.random_sample(n)\n data['y'] = block\n # Add implicit nulls\n data['x'][data['x'] < 0.2] = np.nan\n\n ty = pa.struct([pa.field('x', pa.float64()),\n pa.field('y', pa.binary())])\n arr = pa.array(data, type=ty, from_pandas=True)\n assert arr.num_chunks == 2\n\n def iter_chunked_array(arr):\n for chunk in arr.iterchunks():\n yield from chunk\n\n def check(arr, data, mask=None):\n assert len(arr) == len(data)\n xs = data['x']\n ys = data['y']\n for i, obj in enumerate(iter_chunked_array(arr)):\n try:\n d = obj.as_py()\n if mask is not None and mask[i]:\n assert d is None\n else:\n x = xs[i]\n if np.isnan(x):\n assert d['x'] is None\n else:\n assert d['x'] == x\n assert d['y'] == ys[i]\n except Exception:\n print(\"Failed at index\", i)\n raise\n\n check(arr, data)\n del arr\n\n # Now with explicit mask\n mask = np.random.random_sample(n) < 0.2\n arr = pa.array(data, type=ty, mask=mask, from_pandas=True)\n assert arr.num_chunks == 2\n\n check(arr, data, mask)\n del arr\n\n def test_from_numpy_bad_input(self):\n ty = pa.struct([pa.field('x', pa.int32()),\n pa.field('y', pa.bool_())])\n dt = np.dtype([('x', np.int32),\n ('z', np.bool_)])\n\n data = np.array([], dtype=dt)\n with pytest.raises(ValueError,\n match=\"Missing field 'y'\"):\n pa.array(data, type=ty)\n data = np.int32([])\n with pytest.raises(TypeError,\n match=\"Expected struct array\"):\n pa.array(data, type=ty)\n\n def test_from_tuples(self):\n df = pd.DataFrame({'tuples': [(1, 2), (3, 4)]})\n expected_df = pd.DataFrame(\n {'tuples': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]})\n\n # conversion from tuples works when specifying expected struct type\n struct_type = pa.struct([('a', pa.int64()), ('b', pa.int64())])\n\n arr = np.asarray(df['tuples'])\n _check_array_roundtrip(\n arr, expected=expected_df['tuples'], type=struct_type)\n\n expected_schema = pa.schema([('tuples', struct_type)])\n _check_pandas_roundtrip(\n df, expected=expected_df, schema=expected_schema,\n expected_schema=expected_schema)\n\n\nclass TestZeroCopyConversion:\n \"\"\"\n Tests that zero-copy conversion works with some types.\n \"\"\"\n\n def test_zero_copy_success(self):\n result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)\n npt.assert_array_equal(result, [0, 1, 2])\n\n def test_zero_copy_dictionaries(self):\n arr = pa.DictionaryArray.from_arrays(\n np.array([0, 0]),\n np.array([5]))\n\n result = arr.to_pandas(zero_copy_only=True)\n values = pd.Categorical([5, 5])\n\n tm.assert_series_equal(pd.Series(result), pd.Series(values),\n check_names=False)\n\n def test_zero_copy_timestamp(self):\n arr = np.array(['2007-07-13'], dtype='datetime64[ns]')\n result = pa.array(arr).to_pandas(zero_copy_only=True)\n npt.assert_array_equal(result, arr)\n\n def test_zero_copy_duration(self):\n arr = np.array([1], dtype='timedelta64[ns]')\n result = pa.array(arr).to_pandas(zero_copy_only=True)\n npt.assert_array_equal(result, arr)\n\n def check_zero_copy_failure(self, arr):\n with pytest.raises(pa.ArrowInvalid):\n arr.to_pandas(zero_copy_only=True)\n\n def test_zero_copy_failure_on_object_types(self):\n self.check_zero_copy_failure(pa.array(['A', 'B', 'C']))\n\n def test_zero_copy_failure_with_int_when_nulls(self):\n self.check_zero_copy_failure(pa.array([0, 1, None]))\n\n def test_zero_copy_failure_with_float_when_nulls(self):\n self.check_zero_copy_failure(pa.array([0.0, 1.0, None]))\n\n def test_zero_copy_failure_on_bool_types(self):\n self.check_zero_copy_failure(pa.array([True, False]))\n\n def test_zero_copy_failure_on_list_types(self):\n arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64()))\n self.check_zero_copy_failure(arr)\n\n def test_zero_copy_failure_on_timestamp_with_nulls(self):\n arr = np.array([1, None], dtype='datetime64[ns]')\n self.check_zero_copy_failure(pa.array(arr))\n\n def test_zero_copy_failure_on_duration_with_nulls(self):\n arr = np.array([1, None], dtype='timedelta64[ns]')\n self.check_zero_copy_failure(pa.array(arr))\n\n\ndef _non_threaded_conversion():\n df = _alltypes_example()\n _check_pandas_roundtrip(df, use_threads=False)\n _check_pandas_roundtrip(df, use_threads=False, as_batch=True)\n\n\ndef _threaded_conversion():\n df = _alltypes_example()\n _check_pandas_roundtrip(df, use_threads=True)\n _check_pandas_roundtrip(df, use_threads=True, as_batch=True)\n\n\nclass TestConvertMisc:\n \"\"\"\n Miscellaneous conversion tests.\n \"\"\"\n\n type_pairs = [\n (np.int8, pa.int8()),\n (np.int16, pa.int16()),\n (np.int32, pa.int32()),\n (np.int64, pa.int64()),\n (np.uint8, pa.uint8()),\n (np.uint16, pa.uint16()),\n (np.uint32, pa.uint32()),\n (np.uint64, pa.uint64()),\n (np.float16, pa.float16()),\n (np.float32, pa.float32()),\n (np.float64, pa.float64()),\n # XXX unsupported\n # (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])),\n (np.object, pa.string()),\n (np.object, pa.binary()),\n (np.object, pa.binary(10)),\n (np.object, pa.list_(pa.int64())),\n ]\n\n def test_all_none_objects(self):\n df = pd.DataFrame({'a': [None, None, None]})\n _check_pandas_roundtrip(df)\n\n def test_all_none_category(self):\n df = pd.DataFrame({'a': [None, None, None]})\n df['a'] = df['a'].astype('category')\n _check_pandas_roundtrip(df)\n\n def test_empty_arrays(self):\n for dtype, pa_type in self.type_pairs:\n arr = np.array([], dtype=dtype)\n _check_array_roundtrip(arr, type=pa_type)\n\n def test_non_threaded_conversion(self):\n _non_threaded_conversion()\n\n def test_threaded_conversion_multiprocess(self):\n # Parallel conversion should work from child processes too (ARROW-2963)\n pool = mp.Pool(2)\n try:\n pool.apply(_threaded_conversion)\n finally:\n pool.close()\n pool.join()\n\n def test_category(self):\n repeats = 5\n v1 = ['foo', None, 'bar', 'qux', np.nan]\n v2 = [4, 5, 6, 7, 8]\n v3 = [b'foo', None, b'bar', b'qux', np.nan]\n\n arrays = {\n 'cat_strings': pd.Categorical(v1 * repeats),\n 'cat_strings_with_na': pd.Categorical(v1 * repeats,\n categories=['foo', 'bar']),\n 'cat_ints': pd.Categorical(v2 * repeats),\n 'cat_binary': pd.Categorical(v3 * repeats),\n 'cat_strings_ordered': pd.Categorical(\n v1 * repeats, categories=['bar', 'qux', 'foo'],\n ordered=True),\n 'ints': v2 * repeats,\n 'ints2': v2 * repeats,\n 'strings': v1 * repeats,\n 'strings2': v1 * repeats,\n 'strings3': v3 * repeats}\n df = pd.DataFrame(arrays)\n _check_pandas_roundtrip(df)\n\n for k in arrays:\n _check_array_roundtrip(arrays[k])\n\n def test_category_implicit_from_pandas(self):\n # ARROW-3374\n def _check(v):\n arr = pa.array(v)\n result = arr.to_pandas()\n tm.assert_series_equal(pd.Series(result), pd.Series(v))\n\n arrays = [\n pd.Categorical(['a', 'b', 'c'], categories=['a', 'b']),\n pd.Categorical(['a', 'b', 'c'], categories=['a', 'b'],\n ordered=True)\n ]\n for arr in arrays:\n _check(arr)\n\n def test_empty_category(self):\n # ARROW-2443\n df = pd.DataFrame({'cat': pd.Categorical([])})\n _check_pandas_roundtrip(df)\n\n def test_category_zero_chunks(self):\n # ARROW-5952\n for pa_type, dtype in [(pa.string(), 'object'), (pa.int64(), 'int64')]:\n a = pa.chunked_array([], pa.dictionary(pa.int8(), pa_type))\n result = a.to_pandas()\n expected = pd.Categorical([], categories=np.array([], dtype=dtype))\n tm.assert_series_equal(pd.Series(result), pd.Series(expected))\n\n table = pa.table({'a': a})\n result = table.to_pandas()\n expected = pd.DataFrame({'a': expected})\n tm.assert_frame_equal(result, expected)\n\n def test_mixed_types_fails(self):\n data = pd.DataFrame({'a': ['a', 1, 2.0]})\n with pytest.raises(pa.ArrowTypeError):\n pa.Table.from_pandas(data)\n\n data = pd.DataFrame({'a': [1, True]})\n with pytest.raises(pa.ArrowTypeError):\n pa.Table.from_pandas(data)\n\n data = pd.DataFrame({'a': ['a', 1, 2.0]})\n expected_msg = 'Conversion failed for column a'\n with pytest.raises(pa.ArrowTypeError, match=expected_msg):\n pa.Table.from_pandas(data)\n\n def test_strided_data_import(self):\n cases = []\n\n columns = ['a', 'b', 'c']\n N, K = 100, 3\n random_numbers = np.random.randn(N, K).copy() * 100\n\n numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',\n 'f4', 'f8']\n\n for type_name in numeric_dtypes:\n cases.append(random_numbers.astype(type_name))\n\n # strings\n cases.append(np.array([random_ascii(10) for i in range(N * K)],\n dtype=object)\n .reshape(N, K).copy())\n\n # booleans\n boolean_objects = (np.array([True, False, True] * N, dtype=object)\n .reshape(N, K).copy())\n\n # add some nulls, so dtype comes back as objects\n boolean_objects[5] = None\n cases.append(boolean_objects)\n\n cases.append(np.arange(\"2016-01-01T00:00:00.001\", N * K,\n dtype='datetime64[ms]')\n .reshape(N, K).copy())\n\n strided_mask = (random_numbers > 0).astype(bool)[:, 0]\n\n for case in cases:\n df = pd.DataFrame(case, columns=columns)\n col = df['a']\n\n _check_pandas_roundtrip(df)\n _check_array_roundtrip(col)\n _check_array_roundtrip(col, mask=strided_mask)\n\n def test_all_nones(self):\n def _check_series(s):\n converted = pa.array(s)\n assert isinstance(converted, pa.NullArray)\n assert len(converted) == 3\n assert converted.null_count == 3\n for item in converted:\n assert item is pa.NA\n\n _check_series(pd.Series([None] * 3, dtype=object))\n _check_series(pd.Series([np.nan] * 3, dtype=object))\n _check_series(pd.Series([None, np.nan, None], dtype=object))\n\n def test_partial_schema(self):\n data = OrderedDict([\n ('a', [0, 1, 2, 3, 4]),\n ('b', np.array([-10, -5, 0, 5, 10], dtype=np.int32)),\n ('c', [-10, -5, 0, 5, 10])\n ])\n df = pd.DataFrame(data)\n\n partial_schema = pa.schema([\n pa.field('c', pa.int64()),\n pa.field('a', pa.int64())\n ])\n\n _check_pandas_roundtrip(df, schema=partial_schema,\n expected=df[['c', 'a']],\n expected_schema=partial_schema)\n\n def test_table_batch_empty_dataframe(self):\n df = pd.DataFrame({})\n _check_pandas_roundtrip(df)\n _check_pandas_roundtrip(df, as_batch=True)\n\n df2 = pd.DataFrame({}, index=[0, 1, 2])\n _check_pandas_roundtrip(df2, preserve_index=True)\n _check_pandas_roundtrip(df2, as_batch=True, preserve_index=True)\n\n def test_convert_empty_table(self):\n arr = pa.array([], type=pa.int64())\n empty_objects = pd.Series(np.array([], dtype=object))\n tm.assert_series_equal(arr.to_pandas(),\n pd.Series(np.array([], dtype=np.int64)))\n arr = pa.array([], type=pa.string())\n tm.assert_series_equal(arr.to_pandas(), empty_objects)\n arr = pa.array([], type=pa.list_(pa.int64()))\n tm.assert_series_equal(arr.to_pandas(), empty_objects)\n arr = pa.array([], type=pa.struct([pa.field('a', pa.int64())]))\n tm.assert_series_equal(arr.to_pandas(), empty_objects)\n\n def test_non_natural_stride(self):\n \"\"\"\n ARROW-2172: converting from a Numpy array with a stride that's\n not a multiple of itemsize.\n \"\"\"\n dtype = np.dtype([('x', np.int32), ('y', np.int16)])\n data = np.array([(42, -1), (-43, 2)], dtype=dtype)\n assert data.strides == (6,)\n arr = pa.array(data['x'], type=pa.int32())\n assert arr.to_pylist() == [42, -43]\n arr = pa.array(data['y'], type=pa.int16())\n assert arr.to_pylist() == [-1, 2]\n\n def test_array_from_strided_numpy_array(self):\n # ARROW-5651\n np_arr = np.arange(0, 10, dtype=np.float32)[1:-1:2]\n pa_arr = pa.array(np_arr, type=pa.float64())\n expected = pa.array([1.0, 3.0, 5.0, 7.0], type=pa.float64())\n pa_arr.equals(expected)\n\n def test_safe_unsafe_casts(self):\n # ARROW-2799\n df = pd.DataFrame({\n 'A': list('abc'),\n 'B': np.linspace(0, 1, 3)\n })\n\n schema = pa.schema([\n pa.field('A', pa.string()),\n pa.field('B', pa.int32())\n ])\n\n with pytest.raises(ValueError):\n pa.Table.from_pandas(df, schema=schema)\n\n table = pa.Table.from_pandas(df, schema=schema, safe=False)\n assert table.column('B').type == pa.int32()\n\n def test_error_sparse(self):\n # ARROW-2818\n df = pd.DataFrame({'a': pd.SparseArray([1, np.nan, 3])})\n with pytest.raises(TypeError, match=\"Sparse pandas data\"):\n pa.Table.from_pandas(df)\n\n\ndef test_safe_cast_from_float_with_nans_to_int():\n # TODO(kszucs): write tests for creating Date32 and Date64 arrays, see\n # ARROW-4258 and https://github.com/apache/arrow/pull/3395\n values = pd.Series([1, 2, None, 4])\n arr = pa.Array.from_pandas(values, type=pa.int32(), safe=True)\n expected = pa.array([1, 2, None, 4], type=pa.int32())\n assert arr.equals(expected)\n\n\ndef _fully_loaded_dataframe_example():\n index = pd.MultiIndex.from_arrays([\n pd.date_range('2000-01-01', periods=5).repeat(2),\n np.tile(np.array(['foo', 'bar'], dtype=object), 5)\n ])\n\n c1 = pd.date_range('2000-01-01', periods=10)\n data = {\n 0: c1,\n 1: c1.tz_localize('utc'),\n 2: c1.tz_localize('US/Eastern'),\n 3: c1[::2].tz_localize('utc').repeat(2).astype('category'),\n 4: ['foo', 'bar'] * 5,\n 5: pd.Series(['foo', 'bar'] * 5).astype('category').values,\n 6: [True, False] * 5,\n 7: np.random.randn(10),\n 8: np.random.randint(0, 100, size=10),\n 9: pd.period_range('2013', periods=10, freq='M')\n }\n\n if LooseVersion(pd.__version__) >= '0.21':\n # There is an issue with pickling IntervalIndex in pandas 0.20.x\n data[10] = pd.interval_range(start=1, freq=1, periods=10)\n\n return pd.DataFrame(data, index=index)\n\n\[email protected]('columns', ([b'foo'], ['foo']))\ndef test_roundtrip_with_bytes_unicode(columns):\n df = pd.DataFrame(columns=columns)\n table1 = pa.Table.from_pandas(df)\n table2 = pa.Table.from_pandas(table1.to_pandas())\n assert table1.equals(table2)\n assert table1.schema.equals(table2.schema)\n assert table1.schema.metadata == table2.schema.metadata\n\n\ndef _check_serialize_components_roundtrip(pd_obj):\n ctx = pa.default_serialization_context()\n\n components = ctx.serialize(pd_obj).to_components()\n deserialized = ctx.deserialize_components(components)\n\n if isinstance(pd_obj, pd.DataFrame):\n tm.assert_frame_equal(pd_obj, deserialized)\n else:\n tm.assert_series_equal(pd_obj, deserialized)\n\n\[email protected](LooseVersion(np.__version__) >= '0.16',\n reason='Until numpy/numpy#12745 is resolved')\ndef test_serialize_deserialize_pandas():\n # ARROW-1784, serialize and deserialize DataFrame by decomposing\n # BlockManager\n df = _fully_loaded_dataframe_example()\n _check_serialize_components_roundtrip(df)\n\n\ndef test_serialize_deserialize_empty_pandas():\n # ARROW-7996, serialize and deserialize empty pandas objects\n df = pd.DataFrame({'col1': [], 'col2': [], 'col3': []})\n _check_serialize_components_roundtrip(df)\n\n series = pd.Series([], dtype=np.float32, name='col')\n _check_serialize_components_roundtrip(series)\n\n\ndef _pytime_from_micros(val):\n microseconds = val % 1000000\n val //= 1000000\n seconds = val % 60\n val //= 60\n minutes = val % 60\n hours = val // 60\n return time(hours, minutes, seconds, microseconds)\n\n\ndef _pytime_to_micros(pytime):\n return (pytime.hour * 3600000000 +\n pytime.minute * 60000000 +\n pytime.second * 1000000 +\n pytime.microsecond)\n\n\ndef test_convert_unsupported_type_error_message():\n # ARROW-1454\n\n # custom python objects\n class A:\n pass\n\n df = pd.DataFrame({'a': [A(), A()]})\n\n msg = 'Conversion failed for column a with type object'\n with pytest.raises(ValueError, match=msg):\n pa.Table.from_pandas(df)\n\n # period unsupported for pandas <= 0.25\n if LooseVersion(pd.__version__) <= '0.25':\n df = pd.DataFrame({\n 'a': pd.period_range('2000-01-01', periods=20),\n })\n\n msg = 'Conversion failed for column a with type (period|object)'\n with pytest.raises((TypeError, ValueError), match=msg):\n pa.Table.from_pandas(df)\n\n\n# ----------------------------------------------------------------------\n# Test object deduplication in to_pandas\n\n\ndef _generate_dedup_example(nunique, repeats):\n unique_values = [rands(10) for i in range(nunique)]\n return unique_values * repeats\n\n\ndef _assert_nunique(obj, expected):\n assert len({id(x) for x in obj}) == expected\n\n\ndef test_to_pandas_deduplicate_strings_array_types():\n nunique = 100\n repeats = 10\n values = _generate_dedup_example(nunique, repeats)\n\n for arr in [pa.array(values, type=pa.binary()),\n pa.array(values, type=pa.utf8()),\n pa.chunked_array([values, values])]:\n _assert_nunique(arr.to_pandas(), nunique)\n _assert_nunique(arr.to_pandas(deduplicate_objects=False), len(arr))\n\n\ndef test_to_pandas_deduplicate_strings_table_types():\n nunique = 100\n repeats = 10\n values = _generate_dedup_example(nunique, repeats)\n\n arr = pa.array(values)\n rb = pa.RecordBatch.from_arrays([arr], ['foo'])\n tbl = pa.Table.from_batches([rb])\n\n for obj in [rb, tbl]:\n _assert_nunique(obj.to_pandas()['foo'], nunique)\n _assert_nunique(obj.to_pandas(deduplicate_objects=False)['foo'],\n len(obj))\n\n\ndef test_to_pandas_deduplicate_integers_as_objects():\n nunique = 100\n repeats = 10\n\n # Python automatically interns smaller integers\n unique_values = list(np.random.randint(10000000, 1000000000, size=nunique))\n unique_values[nunique // 2] = None\n\n arr = pa.array(unique_values * repeats)\n\n _assert_nunique(arr.to_pandas(integer_object_nulls=True), nunique)\n _assert_nunique(arr.to_pandas(integer_object_nulls=True,\n deduplicate_objects=False),\n # Account for None\n (nunique - 1) * repeats + 1)\n\n\ndef test_to_pandas_deduplicate_date_time():\n nunique = 100\n repeats = 10\n\n unique_values = list(range(nunique))\n\n cases = [\n # raw type, array type, to_pandas options\n ('int32', 'date32', {'date_as_object': True}),\n ('int64', 'date64', {'date_as_object': True}),\n ('int32', 'time32[ms]', {}),\n ('int64', 'time64[us]', {})\n ]\n\n for raw_type, array_type, pandas_options in cases:\n raw_arr = pa.array(unique_values * repeats, type=raw_type)\n casted_arr = raw_arr.cast(array_type)\n\n _assert_nunique(casted_arr.to_pandas(**pandas_options),\n nunique)\n _assert_nunique(casted_arr.to_pandas(deduplicate_objects=False,\n **pandas_options),\n len(casted_arr))\n\n\n# ---------------------------------------------------------------------\n\ndef test_table_from_pandas_checks_field_nullability():\n # ARROW-2136\n df = pd.DataFrame({'a': [1.2, 2.1, 3.1],\n 'b': [np.nan, 'string', 'foo']})\n schema = pa.schema([pa.field('a', pa.float64(), nullable=False),\n pa.field('b', pa.utf8(), nullable=False)])\n\n with pytest.raises(ValueError):\n pa.Table.from_pandas(df, schema=schema)\n\n\ndef test_table_from_pandas_keeps_column_order_of_dataframe():\n df1 = pd.DataFrame(OrderedDict([\n ('partition', [0, 0, 1, 1]),\n ('arrays', [[0, 1, 2], [3, 4], None, None]),\n ('floats', [None, None, 1.1, 3.3])\n ]))\n df2 = df1[['floats', 'partition', 'arrays']]\n\n schema1 = pa.schema([\n ('partition', pa.int64()),\n ('arrays', pa.list_(pa.int64())),\n ('floats', pa.float64()),\n ])\n schema2 = pa.schema([\n ('floats', pa.float64()),\n ('partition', pa.int64()),\n ('arrays', pa.list_(pa.int64()))\n ])\n\n table1 = pa.Table.from_pandas(df1, preserve_index=False)\n table2 = pa.Table.from_pandas(df2, preserve_index=False)\n\n assert table1.schema.equals(schema1)\n assert table2.schema.equals(schema2)\n\n\ndef test_table_from_pandas_keeps_column_order_of_schema():\n # ARROW-3766\n df = pd.DataFrame(OrderedDict([\n ('partition', [0, 0, 1, 1]),\n ('arrays', [[0, 1, 2], [3, 4], None, None]),\n ('floats', [None, None, 1.1, 3.3])\n ]))\n\n schema = pa.schema([\n ('floats', pa.float64()),\n ('arrays', pa.list_(pa.int32())),\n ('partition', pa.int32())\n ])\n\n df1 = df[df.partition == 0]\n df2 = df[df.partition == 1][['floats', 'partition', 'arrays']]\n\n table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)\n table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)\n\n assert table1.schema.equals(schema)\n assert table1.schema.equals(table2.schema)\n\n\ndef test_table_from_pandas_columns_argument_only_does_filtering():\n df = pd.DataFrame(OrderedDict([\n ('partition', [0, 0, 1, 1]),\n ('arrays', [[0, 1, 2], [3, 4], None, None]),\n ('floats', [None, None, 1.1, 3.3])\n ]))\n\n columns1 = ['arrays', 'floats', 'partition']\n schema1 = pa.schema([\n ('arrays', pa.list_(pa.int64())),\n ('floats', pa.float64()),\n ('partition', pa.int64())\n ])\n\n columns2 = ['floats', 'partition']\n schema2 = pa.schema([\n ('floats', pa.float64()),\n ('partition', pa.int64())\n ])\n\n table1 = pa.Table.from_pandas(df, columns=columns1, preserve_index=False)\n table2 = pa.Table.from_pandas(df, columns=columns2, preserve_index=False)\n\n assert table1.schema.equals(schema1)\n assert table2.schema.equals(schema2)\n\n\ndef test_table_from_pandas_columns_and_schema_are_mutually_exclusive():\n df = pd.DataFrame(OrderedDict([\n ('partition', [0, 0, 1, 1]),\n ('arrays', [[0, 1, 2], [3, 4], None, None]),\n ('floats', [None, None, 1.1, 3.3])\n ]))\n schema = pa.schema([\n ('partition', pa.int32()),\n ('arrays', pa.list_(pa.int32())),\n ('floats', pa.float64()),\n ])\n columns = ['arrays', 'floats']\n\n with pytest.raises(ValueError):\n pa.Table.from_pandas(df, schema=schema, columns=columns)\n\n\ndef test_table_from_pandas_keeps_schema_nullability():\n # ARROW-5169\n df = pd.DataFrame({'a': [1, 2, 3, 4]})\n\n schema = pa.schema([\n pa.field('a', pa.int64(), nullable=False),\n ])\n\n table = pa.Table.from_pandas(df)\n assert table.schema.field('a').nullable is True\n table = pa.Table.from_pandas(df, schema=schema)\n assert table.schema.field('a').nullable is False\n\n\ndef test_table_from_pandas_schema_index_columns():\n # ARROW-5220\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]})\n\n schema = pa.schema([\n ('a', pa.int64()),\n ('b', pa.float64()),\n ('index', pa.int32()),\n ])\n\n # schema includes index with name not in dataframe\n with pytest.raises(KeyError, match=\"name 'index' present in the\"):\n pa.Table.from_pandas(df, schema=schema)\n\n df.index.name = 'index'\n\n # schema includes correct index name -> roundtrip works\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema)\n\n # schema includes correct index name but preserve_index=False\n with pytest.raises(ValueError, match=\"'preserve_index=False' was\"):\n pa.Table.from_pandas(df, schema=schema, preserve_index=False)\n\n # in case of preserve_index=None -> RangeIndex serialized as metadata\n # clashes with the index in the schema\n with pytest.raises(ValueError, match=\"name 'index' is present in the \"\n \"schema, but it is a RangeIndex\"):\n pa.Table.from_pandas(df, schema=schema, preserve_index=None)\n\n df.index = pd.Index([0, 1, 2], name='index')\n\n # for non-RangeIndex, both preserve_index=None and True work\n _check_pandas_roundtrip(df, schema=schema, preserve_index=None,\n expected_schema=schema)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema)\n\n # schema has different order (index column not at the end)\n schema = pa.schema([\n ('index', pa.int32()),\n ('a', pa.int64()),\n ('b', pa.float64()),\n ])\n _check_pandas_roundtrip(df, schema=schema, preserve_index=None,\n expected_schema=schema)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema)\n\n # schema does not include the index -> index is not included as column\n # even though preserve_index=True/None\n schema = pa.schema([\n ('a', pa.int64()),\n ('b', pa.float64()),\n ])\n expected = df.copy()\n expected = expected.reset_index(drop=True)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=None,\n expected_schema=schema, expected=expected)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema, expected=expected)\n\n # dataframe with a MultiIndex\n df.index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],\n names=['level1', 'level2'])\n schema = pa.schema([\n ('level1', pa.string()),\n ('level2', pa.int64()),\n ('a', pa.int64()),\n ('b', pa.float64()),\n ])\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=None,\n expected_schema=schema)\n\n # only one of the levels of the MultiIndex is included\n schema = pa.schema([\n ('level2', pa.int64()),\n ('a', pa.int64()),\n ('b', pa.float64()),\n ])\n expected = df.copy()\n expected = expected.reset_index('level1', drop=True)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=True,\n expected_schema=schema, expected=expected)\n _check_pandas_roundtrip(df, schema=schema, preserve_index=None,\n expected_schema=schema, expected=expected)\n\n\ndef test_table_from_pandas_schema_index_columns__unnamed_index():\n # ARROW-6999 - unnamed indices in specified schema\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]})\n\n expected_schema = pa.schema([\n ('a', pa.int64()),\n ('b', pa.float64()),\n ('__index_level_0__', pa.int64()),\n ])\n\n schema = pa.Schema.from_pandas(df, preserve_index=True)\n table = pa.Table.from_pandas(df, preserve_index=True, schema=schema)\n assert table.schema.remove_metadata().equals(expected_schema)\n\n # non-RangeIndex (preserved by default)\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]}, index=[0, 1, 2])\n schema = pa.Schema.from_pandas(df)\n table = pa.Table.from_pandas(df, schema=schema)\n assert table.schema.remove_metadata().equals(expected_schema)\n\n\ndef test_table_from_pandas_schema_with_custom_metadata():\n # ARROW-7087 - metadata disappear from pandas\n df = pd.DataFrame()\n schema = pa.Schema.from_pandas(df).with_metadata({'meta': 'True'})\n table = pa.Table.from_pandas(df, schema=schema)\n assert table.schema.metadata.get(b'meta') == b'True'\n\n\n# ----------------------------------------------------------------------\n# RecordBatch, Table\n\n\ndef test_recordbatch_from_to_pandas():\n data = pd.DataFrame({\n 'c1': np.array([1, 2, 3, 4, 5], dtype='int64'),\n 'c2': np.array([1, 2, 3, 4, 5], dtype='uint32'),\n 'c3': np.random.randn(5),\n 'c4': ['foo', 'bar', None, 'baz', 'qux'],\n 'c5': [False, True, False, True, False]\n })\n\n batch = pa.RecordBatch.from_pandas(data)\n result = batch.to_pandas()\n tm.assert_frame_equal(data, result)\n\n\ndef test_recordbatchlist_to_pandas():\n data1 = pd.DataFrame({\n 'c1': np.array([1, 1, 2], dtype='uint32'),\n 'c2': np.array([1.0, 2.0, 3.0], dtype='float64'),\n 'c3': [True, None, False],\n 'c4': ['foo', 'bar', None]\n })\n\n data2 = pd.DataFrame({\n 'c1': np.array([3, 5], dtype='uint32'),\n 'c2': np.array([4.0, 5.0], dtype='float64'),\n 'c3': [True, True],\n 'c4': ['baz', 'qux']\n })\n\n batch1 = pa.RecordBatch.from_pandas(data1)\n batch2 = pa.RecordBatch.from_pandas(data2)\n\n table = pa.Table.from_batches([batch1, batch2])\n result = table.to_pandas()\n data = pd.concat([data1, data2]).reset_index(drop=True)\n tm.assert_frame_equal(data, result)\n\n\ndef test_recordbatch_table_pass_name_to_pandas():\n rb = pa.record_batch([pa.array([1, 2, 3, 4])], names=['a0'])\n t = pa.table([pa.array([1, 2, 3, 4])], names=['a0'])\n assert rb[0].to_pandas().name == 'a0'\n assert t[0].to_pandas().name == 'a0'\n\n\n# ----------------------------------------------------------------------\n# Metadata serialization\n\n\[email protected](\n ('type', 'expected'),\n [\n (pa.null(), 'empty'),\n (pa.bool_(), 'bool'),\n (pa.int8(), 'int8'),\n (pa.int16(), 'int16'),\n (pa.int32(), 'int32'),\n (pa.int64(), 'int64'),\n (pa.uint8(), 'uint8'),\n (pa.uint16(), 'uint16'),\n (pa.uint32(), 'uint32'),\n (pa.uint64(), 'uint64'),\n (pa.float16(), 'float16'),\n (pa.float32(), 'float32'),\n (pa.float64(), 'float64'),\n (pa.date32(), 'date'),\n (pa.date64(), 'date'),\n (pa.binary(), 'bytes'),\n (pa.binary(length=4), 'bytes'),\n (pa.string(), 'unicode'),\n (pa.list_(pa.list_(pa.int16())), 'list[list[int16]]'),\n (pa.decimal128(18, 3), 'decimal'),\n (pa.timestamp('ms'), 'datetime'),\n (pa.timestamp('us', 'UTC'), 'datetimetz'),\n (pa.time32('s'), 'time'),\n (pa.time64('us'), 'time')\n ]\n)\ndef test_logical_type(type, expected):\n assert get_logical_type(type) == expected\n\n\n# ----------------------------------------------------------------------\n# to_pandas uses MemoryPool\n\ndef test_array_uses_memory_pool():\n # ARROW-6570\n N = 10000\n arr = pa.array(np.arange(N, dtype=np.int64),\n mask=np.random.randint(0, 2, size=N).astype(np.bool_))\n\n # In the case the gc is caught loafing\n gc.collect()\n\n prior_allocation = pa.total_allocated_bytes()\n\n x = arr.to_pandas()\n assert pa.total_allocated_bytes() == (prior_allocation + N * 8)\n x = None # noqa\n gc.collect()\n\n assert pa.total_allocated_bytes() == prior_allocation\n\n # zero copy does not allocate memory\n arr = pa.array(np.arange(N, dtype=np.int64))\n\n prior_allocation = pa.total_allocated_bytes()\n x = arr.to_pandas() # noqa\n assert pa.total_allocated_bytes() == prior_allocation\n\n\ndef test_singleton_blocks_zero_copy():\n # Part of ARROW-3789\n t = pa.table([pa.array(np.arange(1000, dtype=np.int64))], ['f0'])\n\n # Zero copy if split_blocks=True\n _check_to_pandas_memory_unchanged(t, split_blocks=True)\n\n prior_allocation = pa.total_allocated_bytes()\n result = t.to_pandas()\n assert result['f0'].values.flags.writeable\n assert pa.total_allocated_bytes() > prior_allocation\n\n\ndef _check_to_pandas_memory_unchanged(obj, **kwargs):\n prior_allocation = pa.total_allocated_bytes()\n x = obj.to_pandas(**kwargs) # noqa\n\n # Memory allocation unchanged -- either zero copy or self-destructing\n assert pa.total_allocated_bytes() == prior_allocation\n\n\ndef test_to_pandas_split_blocks():\n # ARROW-3789\n t = pa.table([\n pa.array([1, 2, 3, 4, 5], type='i1'),\n pa.array([1, 2, 3, 4, 5], type='i4'),\n pa.array([1, 2, 3, 4, 5], type='i8'),\n pa.array([1, 2, 3, 4, 5], type='f4'),\n pa.array([1, 2, 3, 4, 5], type='f8'),\n pa.array([1, 2, 3, 4, 5], type='f8'),\n pa.array([1, 2, 3, 4, 5], type='f8'),\n pa.array([1, 2, 3, 4, 5], type='f8'),\n ], ['f{}'.format(i) for i in range(8)])\n\n _check_blocks_created(t, 8)\n _check_to_pandas_memory_unchanged(t, split_blocks=True)\n\n\ndef _check_blocks_created(t, number):\n x = t.to_pandas(split_blocks=True)\n assert len(x._data.blocks) == number\n\n\ndef test_to_pandas_self_destruct():\n K = 50\n\n def _make_table():\n return pa.table([\n # Slice to force a copy\n pa.array(np.random.randn(10000)[::2])\n for i in range(K)\n ], ['f{}'.format(i) for i in range(K)])\n\n t = _make_table()\n _check_to_pandas_memory_unchanged(t, split_blocks=True, self_destruct=True)\n\n # Check non-split-block behavior\n t = _make_table()\n _check_to_pandas_memory_unchanged(t, self_destruct=True)\n\n\ndef test_table_uses_memory_pool():\n N = 10000\n arr = pa.array(np.arange(N, dtype=np.int64))\n t = pa.table([arr, arr, arr], ['f0', 'f1', 'f2'])\n\n prior_allocation = pa.total_allocated_bytes()\n x = t.to_pandas()\n\n assert pa.total_allocated_bytes() == (prior_allocation + 3 * N * 8)\n\n # Check successful garbage collection\n x = None # noqa\n gc.collect()\n assert pa.total_allocated_bytes() == prior_allocation\n\n\ndef test_object_leak_in_numpy_array():\n # ARROW-6876\n arr = pa.array([{'a': 1}])\n np_arr = arr.to_pandas()\n assert np_arr.dtype == np.dtype('object')\n obj = np_arr[0]\n refcount = sys.getrefcount(obj)\n assert sys.getrefcount(obj) == refcount\n del np_arr\n assert sys.getrefcount(obj) == refcount - 1\n\n\ndef test_object_leak_in_dataframe():\n # ARROW-6876\n arr = pa.array([{'a': 1}])\n table = pa.table([arr], ['f0'])\n col = table.to_pandas()['f0']\n assert col.dtype == np.dtype('object')\n obj = col[0]\n refcount = sys.getrefcount(obj)\n assert sys.getrefcount(obj) == refcount\n del col\n assert sys.getrefcount(obj) == refcount - 1\n\n\n# ----------------------------------------------------------------------\n# Some nested array tests array tests\n\n\ndef test_array_from_py_float32():\n data = [[1.2, 3.4], [9.0, 42.0]]\n\n t = pa.float32()\n\n arr1 = pa.array(data[0], type=t)\n arr2 = pa.array(data, type=pa.list_(t))\n\n expected1 = np.array(data[0], dtype=np.float32)\n expected2 = pd.Series([np.array(data[0], dtype=np.float32),\n np.array(data[1], dtype=np.float32)])\n\n assert arr1.type == t\n assert arr1.equals(pa.array(expected1))\n assert arr2.equals(pa.array(expected2))\n\n\n# ----------------------------------------------------------------------\n# Timestamp tests\n\n\ndef test_cast_timestamp_unit():\n # ARROW-1680\n val = datetime.now()\n s = pd.Series([val])\n s_nyc = s.dt.tz_localize('tzlocal()').dt.tz_convert('America/New_York')\n\n us_with_tz = pa.timestamp('us', tz='America/New_York')\n\n arr = pa.Array.from_pandas(s_nyc, type=us_with_tz)\n\n # ARROW-1906\n assert arr.type == us_with_tz\n\n arr2 = pa.Array.from_pandas(s, type=pa.timestamp('us'))\n\n assert arr[0].as_py() == s_nyc[0].to_pydatetime()\n assert arr2[0].as_py() == s[0].to_pydatetime()\n\n # Disallow truncation\n arr = pa.array([123123], type='int64').cast(pa.timestamp('ms'))\n expected = pa.array([123], type='int64').cast(pa.timestamp('s'))\n\n # sanity check that the cast worked right\n assert arr.type == pa.timestamp('ms')\n\n target = pa.timestamp('s')\n with pytest.raises(ValueError):\n arr.cast(target)\n\n result = arr.cast(target, safe=False)\n assert result.equals(expected)\n\n # ARROW-1949\n series = pd.Series([pd.Timestamp(1), pd.Timestamp(10), pd.Timestamp(1000)])\n expected = pa.array([0, 0, 1], type=pa.timestamp('us'))\n\n with pytest.raises(ValueError):\n pa.array(series, type=pa.timestamp('us'))\n\n with pytest.raises(ValueError):\n pa.Array.from_pandas(series, type=pa.timestamp('us'))\n\n result = pa.Array.from_pandas(series, type=pa.timestamp('us'), safe=False)\n assert result.equals(expected)\n\n result = pa.array(series, type=pa.timestamp('us'), safe=False)\n assert result.equals(expected)\n\n\ndef test_struct_with_timestamp_tz():\n # ARROW-7723\n ts = pd.Timestamp.now()\n\n # XXX: Ensure that this data does not get promoted to nanoseconds (and thus\n # integers) to preserve behavior in 0.15.1\n for unit in ['s', 'ms', 'us']:\n arr = pa.array([ts], type=pa.timestamp(unit))\n arr2 = pa.array([ts], type=pa.timestamp(unit, tz='America/New_York'))\n\n arr3 = pa.StructArray.from_arrays([arr, arr], ['start', 'stop'])\n arr4 = pa.StructArray.from_arrays([arr2, arr2], ['start', 'stop'])\n\n result = arr3.to_pandas()\n assert isinstance(result[0]['start'], datetime)\n assert isinstance(result[0]['stop'], datetime)\n\n result = arr4.to_pandas()\n assert isinstance(result[0]['start'], datetime)\n assert isinstance(result[0]['stop'], datetime)\n\n # same conversion for table\n result = pa.table({'a': arr3}).to_pandas()\n assert isinstance(result['a'][0]['start'], datetime)\n assert isinstance(result['a'][0]['stop'], datetime)\n\n result = pa.table({'a': arr4}).to_pandas()\n assert isinstance(result['a'][0]['start'], datetime)\n assert isinstance(result['a'][0]['stop'], datetime)\n\n\n# ----------------------------------------------------------------------\n# DictionaryArray tests\n\n\ndef test_dictionary_with_pandas():\n src_indices = np.repeat([0, 1, 2], 2)\n dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)\n mask = np.array([False, False, True, False, False, False])\n\n for index_type in ['uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32',\n 'uint64', 'int64']:\n indices = src_indices.astype(index_type)\n d1 = pa.DictionaryArray.from_arrays(indices, dictionary)\n d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask)\n\n if index_type[0] == 'u':\n # TODO: unsigned dictionary indices to pandas\n with pytest.raises(TypeError):\n d1.to_pandas()\n continue\n\n pandas1 = d1.to_pandas()\n ex_pandas1 = pd.Categorical.from_codes(indices, categories=dictionary)\n\n tm.assert_series_equal(pd.Series(pandas1), pd.Series(ex_pandas1))\n\n pandas2 = d2.to_pandas()\n assert pandas2.isnull().sum() == 1\n\n # Unsigned integers converted to signed\n signed_indices = indices\n if index_type[0] == 'u':\n signed_indices = indices.astype(index_type[1:])\n ex_pandas2 = pd.Categorical.from_codes(np.where(mask, -1,\n signed_indices),\n categories=dictionary)\n\n tm.assert_series_equal(pd.Series(pandas2), pd.Series(ex_pandas2))\n\n\ndef random_strings(n, item_size, pct_null=0, dictionary=None):\n if dictionary is not None:\n result = dictionary[np.random.randint(0, len(dictionary), size=n)]\n else:\n result = np.array([random_ascii(item_size) for i in range(n)],\n dtype=object)\n\n if pct_null > 0:\n result[np.random.rand(n) < pct_null] = None\n\n return result\n\n\ndef test_variable_dictionary_to_pandas():\n np.random.seed(12345)\n\n d1 = pa.array(random_strings(100, 32), type='string')\n d2 = pa.array(random_strings(100, 16), type='string')\n d3 = pa.array(random_strings(10000, 10), type='string')\n\n a1 = pa.DictionaryArray.from_arrays(\n np.random.randint(0, len(d1), size=1000, dtype='i4'),\n d1\n )\n a2 = pa.DictionaryArray.from_arrays(\n np.random.randint(0, len(d2), size=1000, dtype='i4'),\n d2\n )\n\n # With some nulls\n a3 = pa.DictionaryArray.from_arrays(\n np.random.randint(0, len(d3), size=1000, dtype='i4'), d3)\n\n i4 = pa.array(\n np.random.randint(0, len(d3), size=1000, dtype='i4'),\n mask=np.random.rand(1000) < 0.1\n )\n a4 = pa.DictionaryArray.from_arrays(i4, d3)\n\n expected_dict = pa.concat_arrays([d1, d2, d3])\n\n a = pa.chunked_array([a1, a2, a3, a4])\n a_dense = pa.chunked_array([a1.cast('string'),\n a2.cast('string'),\n a3.cast('string'),\n a4.cast('string')])\n\n result = a.to_pandas()\n result_dense = a_dense.to_pandas()\n\n assert (result.cat.categories == expected_dict.to_pandas()).all()\n\n expected_dense = result.astype('str')\n expected_dense[result_dense.isnull()] = None\n tm.assert_series_equal(result_dense, expected_dense)\n\n\ndef test_dictionary_encoded_nested_to_pandas():\n # ARROW-6899\n child = pa.array(['a', 'a', 'a', 'b', 'b']).dictionary_encode()\n\n arr = pa.ListArray.from_arrays([0, 3, 5], child)\n\n result = arr.to_pandas()\n expected = pd.Series([np.array(['a', 'a', 'a'], dtype=object),\n np.array(['b', 'b'], dtype=object)])\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_dictionary_from_pandas():\n cat = pd.Categorical(['a', 'b', 'a'])\n expected_type = pa.dictionary(pa.int8(), pa.string())\n\n result = pa.array(cat)\n assert result.to_pylist() == ['a', 'b', 'a']\n assert result.type.equals(expected_type)\n\n # with missing values in categorical\n cat = pd.Categorical(['a', 'b', None, 'a'])\n\n result = pa.array(cat)\n assert result.to_pylist() == ['a', 'b', None, 'a']\n assert result.type.equals(expected_type)\n\n # with additional mask\n result = pa.array(cat, mask=np.array([False, False, False, True]))\n assert result.to_pylist() == ['a', 'b', None, None]\n assert result.type.equals(expected_type)\n\n\ndef test_dictionary_from_pandas_specified_type():\n # ARROW-7168 - ensure specified type is always respected\n\n # the same as cat = pd.Categorical(['a', 'b']) but explicit about dtypes\n cat = pd.Categorical.from_codes(\n np.array([0, 1], dtype='int8'), np.array(['a', 'b'], dtype=object))\n\n # different index type -> allow this\n # (the type of the 'codes' in pandas is not part of the data type)\n typ = pa.dictionary(index_type=pa.int16(), value_type=pa.string())\n result = pa.array(cat, type=typ)\n assert result.type.equals(typ)\n assert result.to_pylist() == ['a', 'b']\n\n # mismatching values type -> raise error (for now a deprecation warning)\n typ = pa.dictionary(index_type=pa.int8(), value_type=pa.int64())\n with pytest.warns(FutureWarning):\n result = pa.array(cat, type=typ)\n assert result.to_pylist() == ['a', 'b']\n\n # mismatching order -> raise error (for now a deprecation warning)\n typ = pa.dictionary(\n index_type=pa.int8(), value_type=pa.string(), ordered=True)\n with pytest.warns(FutureWarning, match=\"The 'ordered' flag of the passed\"):\n result = pa.array(cat, type=typ)\n assert result.to_pylist() == ['a', 'b']\n\n # with mask\n typ = pa.dictionary(index_type=pa.int16(), value_type=pa.string())\n result = pa.array(cat, type=typ, mask=np.array([False, True]))\n assert result.type.equals(typ)\n assert result.to_pylist() == ['a', None]\n\n # empty categorical -> be flexible in values type to allow\n cat = pd.Categorical([])\n\n typ = pa.dictionary(index_type=pa.int8(), value_type=pa.string())\n result = pa.array(cat, type=typ)\n assert result.type.equals(typ)\n assert result.to_pylist() == []\n typ = pa.dictionary(index_type=pa.int8(), value_type=pa.int64())\n result = pa.array(cat, type=typ)\n assert result.type.equals(typ)\n assert result.to_pylist() == []\n\n # passing non-dictionary type\n cat = pd.Categorical(['a', 'b'])\n result = pa.array(cat, type=pa.string())\n expected = pa.array(['a', 'b'], type=pa.string())\n assert result.equals(expected)\n assert result.to_pylist() == ['a', 'b']\n\n\n# ----------------------------------------------------------------------\n# Array protocol in pandas conversions tests\n\n\ndef test_array_protocol():\n if LooseVersion(pd.__version__) < '0.24.0':\n pytest.skip('IntegerArray only introduced in 0.24')\n\n df = pd.DataFrame({'a': pd.Series([1, 2, None], dtype='Int64')})\n\n if LooseVersion(pd.__version__) < '0.26.0.dev':\n # with pandas<=0.25, trying to convert nullable integer errors\n with pytest.raises(TypeError):\n pa.table(df)\n else:\n # __arrow_array__ added to pandas IntegerArray in 0.26.0.dev\n\n # default conversion\n result = pa.table(df)\n expected = pa.array([1, 2, None], pa.int64())\n assert result[0].chunk(0).equals(expected)\n\n # with specifying schema\n schema = pa.schema([('a', pa.float64())])\n result = pa.table(df, schema=schema)\n expected2 = pa.array([1, 2, None], pa.float64())\n assert result[0].chunk(0).equals(expected2)\n\n # pass Series to pa.array\n result = pa.array(df['a'])\n assert result.equals(expected)\n result = pa.array(df['a'], type=pa.float64())\n assert result.equals(expected2)\n\n # pass actual ExtensionArray to pa.array\n result = pa.array(df['a'].values)\n assert result.equals(expected)\n result = pa.array(df['a'].values, type=pa.float64())\n assert result.equals(expected2)\n\n\nclass DummyExtensionType(pa.PyExtensionType):\n\n def __init__(self):\n pa.PyExtensionType.__init__(self, pa.int64())\n\n def __reduce__(self):\n return DummyExtensionType, ()\n\n\ndef PandasArray__arrow_array__(self, type=None):\n # hardcode dummy return regardless of self - we only want to check that\n # this method is correctly called\n storage = pa.array([1, 2, 3], type=pa.int64())\n return pa.ExtensionArray.from_storage(DummyExtensionType(), storage)\n\n\ndef test_array_protocol_pandas_extension_types(monkeypatch):\n # ARROW-7022 - ensure protocol works for Period / Interval extension dtypes\n\n if LooseVersion(pd.__version__) < '0.24.0':\n pytest.skip('Period/IntervalArray only introduced in 0.24')\n\n storage = pa.array([1, 2, 3], type=pa.int64())\n expected = pa.ExtensionArray.from_storage(DummyExtensionType(), storage)\n\n monkeypatch.setattr(pd.arrays.PeriodArray, \"__arrow_array__\",\n PandasArray__arrow_array__, raising=False)\n monkeypatch.setattr(pd.arrays.IntervalArray, \"__arrow_array__\",\n PandasArray__arrow_array__, raising=False)\n for arr in [pd.period_range(\"2012-01-01\", periods=3, freq=\"D\").array,\n pd.interval_range(1, 4).array]:\n result = pa.array(arr)\n assert result.equals(expected)\n result = pa.array(pd.Series(arr))\n assert result.equals(expected)\n result = pa.array(pd.Index(arr))\n assert result.equals(expected)\n result = pa.table(pd.DataFrame({'a': arr})).column('a').chunk(0)\n assert result.equals(expected)\n\n\n# ----------------------------------------------------------------------\n# Pandas ExtensionArray support\n\n\ndef _Int64Dtype__from_arrow__(self, array):\n # for test only deal with single chunk for now\n # TODO: do we require handling of chunked arrays in the protocol?\n arr = array.chunk(0)\n buflist = arr.buffers()\n data = np.frombuffer(buflist[-1], dtype='int64')[\n arr.offset:arr.offset + len(arr)]\n bitmask = buflist[0]\n if bitmask is not None:\n mask = pa.BooleanArray.from_buffers(\n pa.bool_(), len(arr), [None, bitmask])\n mask = np.asarray(mask)\n else:\n mask = np.ones(len(arr), dtype=bool)\n int_arr = pd.arrays.IntegerArray(data.copy(), ~mask, copy=False)\n return int_arr\n\n\ndef test_convert_to_extension_array(monkeypatch):\n if LooseVersion(pd.__version__) < \"0.26.0.dev\":\n pytest.skip(\"Conversion from IntegerArray to arrow not yet supported\")\n\n import pandas.core.internals as _int\n\n # table converted from dataframe with extension types (so pandas_metadata\n # has this information)\n df = pd.DataFrame(\n {'a': [1, 2, 3], 'b': pd.array([2, 3, 4], dtype='Int64'),\n 'c': [4, 5, 6]})\n table = pa.table(df)\n\n # Int64Dtype is recognized -> convert to extension block by default\n # for a proper roundtrip\n result = table.to_pandas()\n assert isinstance(result._data.blocks[0], _int.IntBlock)\n assert isinstance(result._data.blocks[1], _int.ExtensionBlock)\n tm.assert_frame_equal(result, df)\n\n # test with missing values\n df2 = pd.DataFrame({'a': pd.array([1, 2, None], dtype='Int64')})\n table2 = pa.table(df2)\n result = table2.to_pandas()\n assert isinstance(result._data.blocks[0], _int.ExtensionBlock)\n tm.assert_frame_equal(result, df2)\n\n # monkeypatch pandas Int64Dtype to *not* have the protocol method\n monkeypatch.delattr(pd.core.arrays.integer._IntegerDtype, \"__from_arrow__\")\n # Int64Dtype has no __from_arrow__ -> use normal conversion\n result = table.to_pandas()\n assert len(result._data.blocks) == 1\n assert isinstance(result._data.blocks[0], _int.IntBlock)\n\n\nclass MyCustomIntegerType(pa.PyExtensionType):\n\n def __init__(self):\n pa.PyExtensionType.__init__(self, pa.int64())\n\n def __reduce__(self):\n return MyCustomIntegerType, ()\n\n def to_pandas_dtype(self):\n return pd.Int64Dtype()\n\n\ndef test_conversion_extensiontype_to_extensionarray(monkeypatch):\n # converting extension type to linked pandas ExtensionDtype/Array\n import pandas.core.internals as _int\n\n if LooseVersion(pd.__version__) < \"0.24.0\":\n pytest.skip(\"ExtensionDtype introduced in pandas 0.24\")\n\n storage = pa.array([1, 2, 3, 4], pa.int64())\n arr = pa.ExtensionArray.from_storage(MyCustomIntegerType(), storage)\n table = pa.table({'a': arr})\n\n if LooseVersion(pd.__version__) < \"0.26.0.dev\":\n # ensure pandas Int64Dtype has the protocol method (for older pandas)\n monkeypatch.setattr(\n pd.Int64Dtype, '__from_arrow__', _Int64Dtype__from_arrow__,\n raising=False)\n\n # extension type points to Int64Dtype, which knows how to create a\n # pandas ExtensionArray\n result = table.to_pandas()\n assert isinstance(result._data.blocks[0], _int.ExtensionBlock)\n expected = pd.DataFrame({'a': pd.array([1, 2, 3, 4], dtype='Int64')})\n tm.assert_frame_equal(result, expected)\n\n # monkeypatch pandas Int64Dtype to *not* have the protocol method\n # (remove the version added above and the actual version for recent pandas)\n if LooseVersion(pd.__version__) < \"0.26.0.dev\":\n monkeypatch.delattr(pd.Int64Dtype, \"__from_arrow__\")\n else:\n monkeypatch.delattr(\n pd.core.arrays.integer._IntegerDtype, \"__from_arrow__\",\n raising=False)\n\n with pytest.raises(ValueError):\n table.to_pandas()\n\n\ndef test_to_pandas_extension_dtypes_mapping():\n if LooseVersion(pd.__version__) < \"0.26.0.dev\":\n pytest.skip(\"Conversion to pandas IntegerArray not yet supported\")\n\n table = pa.table({'a': pa.array([1, 2, 3], pa.int64())})\n\n # default use numpy dtype\n result = table.to_pandas()\n assert result['a'].dtype == np.dtype('int64')\n\n # specify to override the default\n result = table.to_pandas(types_mapper={pa.int64(): pd.Int64Dtype()}.get)\n assert isinstance(result['a'].dtype, pd.Int64Dtype)\n\n # types that return None in function get normal conversion\n table = pa.table({'a': pa.array([1, 2, 3], pa.int32())})\n result = table.to_pandas(types_mapper={pa.int64(): pd.Int64Dtype()}.get)\n assert result['a'].dtype == np.dtype('int32')\n\n # `types_mapper` overrules the pandas metadata\n table = pa.table(pd.DataFrame({'a': pd.array([1, 2, 3], dtype=\"Int64\")}))\n result = table.to_pandas()\n assert isinstance(result['a'].dtype, pd.Int64Dtype)\n result = table.to_pandas(\n types_mapper={pa.int64(): pd.PeriodDtype('D')}.get)\n assert isinstance(result['a'].dtype, pd.PeriodDtype)\n\n\n# ----------------------------------------------------------------------\n# Legacy metadata compatibility tests\n\n\ndef test_metadata_compat_range_index_pre_0_12():\n # Forward compatibility for metadata created from pandas.RangeIndex\n # prior to pyarrow 0.13.0\n a_values = ['foo', 'bar', None, 'baz']\n b_values = ['a', 'a', 'b', 'b']\n a_arrow = pa.array(a_values, type='utf8')\n b_arrow = pa.array(b_values, type='utf8')\n\n rng_index_arrow = pa.array([0, 2, 4, 6], type='int64')\n\n gen_name_0 = '__index_level_0__'\n gen_name_1 = '__index_level_1__'\n\n # Case 1: named RangeIndex\n e1 = pd.DataFrame({\n 'a': a_values\n }, index=pd.RangeIndex(0, 8, step=2, name='qux'))\n t1 = pa.Table.from_arrays([a_arrow, rng_index_arrow],\n names=['a', 'qux'])\n t1 = t1.replace_schema_metadata({\n b'pandas': json.dumps(\n {'index_columns': ['qux'],\n 'column_indexes': [{'name': None,\n 'field_name': None,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': {'encoding': 'UTF-8'}}],\n 'columns': [{'name': 'a',\n 'field_name': 'a',\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None},\n {'name': 'qux',\n 'field_name': 'qux',\n 'pandas_type': 'int64',\n 'numpy_type': 'int64',\n 'metadata': None}],\n 'pandas_version': '0.23.4'}\n )})\n r1 = t1.to_pandas()\n tm.assert_frame_equal(r1, e1)\n\n # Case 2: named RangeIndex, but conflicts with an actual column\n e2 = pd.DataFrame({\n 'qux': a_values\n }, index=pd.RangeIndex(0, 8, step=2, name='qux'))\n t2 = pa.Table.from_arrays([a_arrow, rng_index_arrow],\n names=['qux', gen_name_0])\n t2 = t2.replace_schema_metadata({\n b'pandas': json.dumps(\n {'index_columns': [gen_name_0],\n 'column_indexes': [{'name': None,\n 'field_name': None,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': {'encoding': 'UTF-8'}}],\n 'columns': [{'name': 'a',\n 'field_name': 'a',\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None},\n {'name': 'qux',\n 'field_name': gen_name_0,\n 'pandas_type': 'int64',\n 'numpy_type': 'int64',\n 'metadata': None}],\n 'pandas_version': '0.23.4'}\n )})\n r2 = t2.to_pandas()\n tm.assert_frame_equal(r2, e2)\n\n # Case 3: unnamed RangeIndex\n e3 = pd.DataFrame({\n 'a': a_values\n }, index=pd.RangeIndex(0, 8, step=2, name=None))\n t3 = pa.Table.from_arrays([a_arrow, rng_index_arrow],\n names=['a', gen_name_0])\n t3 = t3.replace_schema_metadata({\n b'pandas': json.dumps(\n {'index_columns': [gen_name_0],\n 'column_indexes': [{'name': None,\n 'field_name': None,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': {'encoding': 'UTF-8'}}],\n 'columns': [{'name': 'a',\n 'field_name': 'a',\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None},\n {'name': None,\n 'field_name': gen_name_0,\n 'pandas_type': 'int64',\n 'numpy_type': 'int64',\n 'metadata': None}],\n 'pandas_version': '0.23.4'}\n )})\n r3 = t3.to_pandas()\n tm.assert_frame_equal(r3, e3)\n\n # Case 4: MultiIndex with named RangeIndex\n e4 = pd.DataFrame({\n 'a': a_values\n }, index=[pd.RangeIndex(0, 8, step=2, name='qux'), b_values])\n t4 = pa.Table.from_arrays([a_arrow, rng_index_arrow, b_arrow],\n names=['a', 'qux', gen_name_1])\n t4 = t4.replace_schema_metadata({\n b'pandas': json.dumps(\n {'index_columns': ['qux', gen_name_1],\n 'column_indexes': [{'name': None,\n 'field_name': None,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': {'encoding': 'UTF-8'}}],\n 'columns': [{'name': 'a',\n 'field_name': 'a',\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None},\n {'name': 'qux',\n 'field_name': 'qux',\n 'pandas_type': 'int64',\n 'numpy_type': 'int64',\n 'metadata': None},\n {'name': None,\n 'field_name': gen_name_1,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None}],\n 'pandas_version': '0.23.4'}\n )})\n r4 = t4.to_pandas()\n tm.assert_frame_equal(r4, e4)\n\n # Case 4: MultiIndex with unnamed RangeIndex\n e5 = pd.DataFrame({\n 'a': a_values\n }, index=[pd.RangeIndex(0, 8, step=2, name=None), b_values])\n t5 = pa.Table.from_arrays([a_arrow, rng_index_arrow, b_arrow],\n names=['a', gen_name_0, gen_name_1])\n t5 = t5.replace_schema_metadata({\n b'pandas': json.dumps(\n {'index_columns': [gen_name_0, gen_name_1],\n 'column_indexes': [{'name': None,\n 'field_name': None,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': {'encoding': 'UTF-8'}}],\n 'columns': [{'name': 'a',\n 'field_name': 'a',\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None},\n {'name': None,\n 'field_name': gen_name_0,\n 'pandas_type': 'int64',\n 'numpy_type': 'int64',\n 'metadata': None},\n {'name': None,\n 'field_name': gen_name_1,\n 'pandas_type': 'unicode',\n 'numpy_type': 'object',\n 'metadata': None}],\n 'pandas_version': '0.23.4'}\n )})\n r5 = t5.to_pandas()\n tm.assert_frame_equal(r5, e5)\n\n\ndef test_metadata_compat_missing_field_name():\n # Combination of missing field name but with index column as metadata.\n # This combo occurs in the latest versions of fastparquet (0.3.2), but not\n # in pyarrow itself (since field_name was added in 0.8, index as metadata\n # only added later)\n\n a_values = [1, 2, 3, 4]\n b_values = ['a', 'b', 'c', 'd']\n a_arrow = pa.array(a_values, type='int64')\n b_arrow = pa.array(b_values, type='utf8')\n\n expected = pd.DataFrame({\n 'a': a_values,\n 'b': b_values,\n }, index=pd.RangeIndex(0, 8, step=2, name='qux'))\n table = pa.table({'a': a_arrow, 'b': b_arrow})\n\n # metadata generated by fastparquet 0.3.2 with missing field_names\n table = table.replace_schema_metadata({\n b'pandas': json.dumps({\n 'column_indexes': [\n {'field_name': None,\n 'metadata': None,\n 'name': None,\n 'numpy_type': 'object',\n 'pandas_type': 'mixed-integer'}\n ],\n 'columns': [\n {'metadata': None,\n 'name': 'a',\n 'numpy_type': 'int64',\n 'pandas_type': 'int64'},\n {'metadata': None,\n 'name': 'b',\n 'numpy_type': 'object',\n 'pandas_type': 'unicode'}\n ],\n 'index_columns': [\n {'kind': 'range',\n 'name': 'qux',\n 'start': 0,\n 'step': 2,\n 'stop': 8}\n ],\n 'pandas_version': '0.25.0'}\n\n )})\n result = table.to_pandas()\n # on python 3.5 the column order can differ -> adding check_like=True\n tm.assert_frame_equal(result, expected, check_like=True)\n\n\ndef make_df_with_timestamps():\n # Some of the milliseconds timestamps deliberately don't fit in the range\n # that is possible with nanosecond timestamps.\n df = pd.DataFrame({\n 'dateTimeMs': [\n np.datetime64('0001-01-01 00:00', 'ms'),\n np.datetime64('2012-05-02 12:35', 'ms'),\n np.datetime64('2012-05-03 15:42', 'ms'),\n np.datetime64('3000-05-03 15:42', 'ms'),\n ],\n 'dateTimeNs': [\n np.datetime64('1991-01-01 00:00', 'ns'),\n np.datetime64('2012-05-02 12:35', 'ns'),\n np.datetime64('2012-05-03 15:42', 'ns'),\n np.datetime64('2050-05-03 15:42', 'ns'),\n ],\n })\n # Not part of what we're testing, just ensuring that the inputs are what we\n # expect.\n assert (df.dateTimeMs.dtype, df.dateTimeNs.dtype) == (\n # O == object, <M8[ns] == timestamp64[ns]\n np.dtype(\"O\"), np.dtype(\"<M8[ns]\")\n )\n return df\n\n\[email protected]\ndef test_timestamp_as_object_parquet(tempdir):\n # Timestamps can be stored as Parquet and reloaded into Pandas with no loss\n # of information if the timestamp_as_object option is True.\n df = make_df_with_timestamps()\n table = pa.Table.from_pandas(df)\n filename = tempdir / \"timestamps_from_pandas.parquet\"\n pq.write_table(table, filename, version=\"2.0\")\n result = pq.read_table(filename)\n df2 = result.to_pandas(timestamp_as_object=True)\n tm.assert_frame_equal(df, df2)\n\n\ndef test_timestamp_as_object_out_of_range():\n # Out of range timestamps can be converted Arrow and reloaded into Pandas\n # with no loss of information if the timestamp_as_object option is True.\n df = make_df_with_timestamps()\n table = pa.Table.from_pandas(df)\n df2 = table.to_pandas(timestamp_as_object=True)\n tm.assert_frame_equal(df, df2)\n\n\[email protected](\"resolution\", [\"s\", \"ms\", \"us\"])\n# One datetime outside nanosecond range, one inside nanosecond range:\[email protected](\"dt\", [datetime(1553, 1, 1), datetime(2020, 1, 1)])\ndef test_timestamp_as_object_non_nanosecond(resolution, dt):\n # Timestamps can be converted Arrow and reloaded into Pandas with no loss\n # of information if the timestamp_as_object option is True.\n arr = pa.array([dt], type=pa.timestamp(resolution))\n result = arr.to_pandas(timestamp_as_object=True)\n assert result.dtype == object\n assert isinstance(result[0], datetime)\n assert result[0] == dt\n\n table = pa.table({'a': arr})\n result = table.to_pandas(timestamp_as_object=True)['a']\n assert result.dtype == object\n assert isinstance(result[0], datetime)\n assert result[0] == dt\n" ]
[ [ "pandas.to_datetime", "pandas.testing.assert_series_equal", "pandas.Series", "numpy.linspace", "numpy.asarray", "pandas.RangeIndex", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.dtype", "numpy.random.random_sample", "pandas.testing.assert_frame_equal", "numpy.random.randn", "numpy.iinfo", "numpy.where", "numpy.bool_", "numpy.random.randint", "numpy.testing.assert_equal", "numpy.arange", "pandas.Index", "pandas.DatetimeIndex", "numpy.frombuffer", "pandas.PeriodDtype", "numpy.repeat", "numpy.zeros", "pandas.to_numeric", "pandas.Categorical.from_codes", "pandas.concat", "pandas.interval_range", "numpy.isnan", "pandas.Categorical", "pandas.array", "numpy.int64", "numpy.random.rand", "pandas.date_range", "pandas.DataFrame.from_dict", "numpy.array", "pandas.SparseArray", "pandas.isnull", "numpy.random.seed", "pandas.period_range", "numpy.int32", "pandas.MultiIndex.from_arrays", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.datetime64", "pandas.Timestamp.now", "numpy.float64", "numpy.ma.masked_array", "pandas.Int64Dtype", "pandas.Timestamp", "numpy.empty" ] ]
tomscolaro/l5kit
[ "92c88c4050b946b6828a47ddb3d13da9d87f9f7d" ]
[ "l5kit/l5kit/geometry/transform.py" ]
[ "from typing import Optional, Sequence, Tuple, Union, cast\n\nimport numpy as np\nimport pymap3d as pm\nimport transforms3d\n\n\ndef rotation33_as_yaw(rotation: np.ndarray) -> float:\n \"\"\"Compute the yaw component of given 3x3 rotation matrix.\n\n Args:\n rotation (np.ndarray): 3x3 rotation matrix (np.float64 dtype recommended)\n\n Returns:\n float: yaw rotation in radians\n \"\"\"\n return cast(float, transforms3d.euler.mat2euler(rotation)[2])\n\n\ndef yaw_as_rotation33(yaw: float) -> np.ndarray:\n \"\"\"Create a 3x3 rotation matrix from given yaw.\n The rotation is counter-clockwise and it is equivalent to:\n [cos(yaw), -sin(yaw), 0.0],\n [sin(yaw), cos(yaw), 0.0],\n [0.0, 0.0, 1.0],\n\n Args:\n yaw (float): yaw rotation in radians\n\n Returns:\n np.ndarray: 3x3 rotation matrix\n \"\"\"\n return transforms3d.euler.euler2mat(0, 0, yaw)\n\n\ndef world_to_image_pixels_matrix(\n image_shape: Tuple[int, int],\n pixel_size_m: np.ndarray,\n ego_translation_m: np.ndarray,\n ego_yaw_rad: Optional[float] = None,\n ego_center_in_image_ratio: Optional[np.ndarray] = None,\n) -> np.ndarray:\n \"\"\"\n Constructs a transformation matrix from world coordinates to image pixels.\n Note: we're ignoring Z coord, with the assumption that it won't change dramatically.\n This means last row of the matrix will be [0,0,1] and we will transform 2D points in fact (X,Y)\n\n Args:\n image_shape (Tuple[int, int]): the size of the image\n pixel_size_m (np.ndarray): how many meters a pixel cover in the two directions\n ego_translation_m (np.ndarray): translation of the ego in meters in world-coordinates\n ego_yaw_rad (Optional[float]):if defined, rotation is applied so that ego will always face to the right\n in the resulting image coordinates\n ego_center_in_image_ratio (Optional[np.ndarray]): enables to position the ego in different places\n in the resulting image. The [0.5, 0.5] value puts it in the center\n\n Returns:\n np.ndarray: 3x3 transformation matrix\n \"\"\"\n\n # Translate world to ego by applying the negative ego translation.\n world_to_ego_in_2d = np.eye(3, dtype=np.float32)\n world_to_ego_in_2d[0:2, 2] = -ego_translation_m[0:2]\n\n if ego_yaw_rad is not None:\n # Rotate counter-clockwise by negative yaw to align world such that ego faces right.\n world_to_ego_in_2d = yaw_as_rotation33(-ego_yaw_rad) @ world_to_ego_in_2d\n\n # Scale the meters to pixels.\n world_to_image_scale = np.eye(3)\n world_to_image_scale[0, 0] = 1.0 / pixel_size_m[0]\n world_to_image_scale[1, 1] = 1.0 / pixel_size_m[1]\n\n # Move so that it is aligned to the defined image center.\n if ego_center_in_image_ratio is None:\n ego_center_in_image_ratio = np.array([0.5, 0.5])\n ego_center_in_pixels = ego_center_in_image_ratio * image_shape\n image_to_ego_center = np.eye(3)\n image_to_ego_center[0:2, 2] = ego_center_in_pixels\n\n # Construct the whole transform and return it.\n return image_to_ego_center @ world_to_image_scale @ world_to_ego_in_2d\n\n\ndef flip_y_axis(tm: np.ndarray, y_dim_size: int) -> np.ndarray:\n \"\"\"\n Return a new matrix that also performs a flip on the y axis.\n\n Args:\n tm: the original 3x3 matrix\n y_dim_size: this should match the resolution on y. It makes all coordinates positive\n\n Returns: a new 3x3 matrix.\n\n \"\"\"\n flip_y = np.eye(3)\n flip_y[1, 1] = -1\n tm = np.matmul(flip_y, tm)\n tm[1, 2] += y_dim_size\n return tm\n\n\ndef transform_points(points: np.ndarray, transf_matrix: np.ndarray) -> np.ndarray:\n \"\"\"\n Transform points using transformation matrix.\n\n Args:\n points (np.ndarray): Input points (Nx2), (Nx3) or (Nx4).\n transf_matrix (np.ndarray): 3x3 or 4x4 transformation matrix for 2D and 3D input respectively\n\n Returns:\n np.ndarray: array of shape (N,2) for 2D input points, or (N,3) points for 3D input points\n \"\"\"\n # TODO: Surely we can do this without transposing.\n return transform_points_transposed(points.transpose(1, 0), transf_matrix).transpose(1, 0)\n\n\ndef transform_points_transposed(points: np.ndarray, transf_matrix: np.ndarray) -> np.ndarray:\n \"\"\"\n Transform points using transformation matrix.\n\n Args:\n points (np.ndarray): Input points (2xN), (3xN) or (4xN).\n transf_matrix (np.ndarray): 3x3 or 4x4 transformation matrix for 2D and 3D input respectively\n\n Returns:\n np.ndarray: array of shape (2,N) for 2D input points, or (3,N) points for 3D input points\n \"\"\"\n num_dims = transf_matrix.shape[0] - 1\n if points.shape[0] not in [2, 3, 4]:\n raise ValueError(\"Points input should be (2, N), (3,N) or (4,N) shape, received {}\".format(points.shape))\n\n return transf_matrix.dot(np.vstack((points[:num_dims, :], np.ones(points.shape[1]))))[:num_dims, :]\n\n\ndef transform_point(point: np.ndarray, transf_matrix: np.ndarray) -> np.ndarray:\n \"\"\" Transform a single vector using transformation matrix.\n\n Args:\n point (np.ndarray): vector of shape (N)\n transf_matrix (np.ndarray): transformation matrix of shape (N+1, N+1)\n\n Returns:\n np.ndarray: vector of same shape as input point\n \"\"\"\n point_ext = np.hstack((point, np.ones(1)))\n return np.matmul(transf_matrix, point_ext)[: point.shape[0]]\n\n\ndef get_transformation_matrix(translation: np.ndarray, rotation: np.ndarray) -> np.ndarray:\n \"\"\"\n Get a 3D transformation matrix from translation vector and quaternion rotation\n\n Args:\n translation (np.ndarray): 3D translation vector\n rotation (np.ndarray): 4 quaternion values\n\n Returns:\n np.ndarray: 4x4 transformation matrix\n \"\"\"\n rot_mat = transforms3d.quaternions.quat2mat(rotation)\n tr = transforms3d.affines.compose(translation, rot_mat, np.ones(3))\n return tr\n\n\ndef ecef_to_geodetic(point: Union[np.ndarray, Sequence[float]]) -> np.ndarray:\n \"\"\"Convert given ECEF coordinate into latitude, longitude, altitude.\n\n Args:\n point (Union[np.ndarray, Sequence[float]]): ECEF coordinate vector\n\n Returns:\n np.ndarray: latitude, altitude, longitude\n \"\"\"\n return np.array(pm.ecef2geodetic(point[0], point[1], point[2]))\n\n\ndef geodetic_to_ecef(lla_point: Union[np.ndarray, Sequence[float]]) -> np.ndarray:\n \"\"\"Convert given latitude, longitude, and optionally altitude into ECEF\n coordinates. If no altitude is given, altitude 0 is assumed.\n\n Args:\n lla_point (Union[np.ndarray, Sequence[float]]): Latitude, Longitude and optionally Altitude\n\n Returns:\n np.ndarray: 3D ECEF coordinate\n \"\"\"\n if len(lla_point) == 2:\n return np.array(pm.geodetic2ecef(lla_point[0], lla_point[1], 0), dtype=np.float64)\n else:\n return np.array(pm.geodetic2ecef(lla_point[0], lla_point[1], lla_point[2]), dtype=np.float64)\n" ]
[ [ "numpy.eye", "numpy.array", "numpy.matmul", "numpy.ones" ] ]
andrewrech/CellSeg
[ "2235a0e15ccc106cc0781d377f95c9e7db22ea9f" ]
[ "src/cvmask.py" ]
[ "# cvmask.py\n# ---------------------------\n# Wrapper class for masks. See class doc for details.\n\nimport numpy as np\nfrom scipy.linalg import lstsq\nfrom scipy.spatial import distance\nfrom operator import itemgetter\nfrom skimage.measure import find_contours\nfrom skimage.morphology import disk, dilation\nfrom scipy.ndimage.morphology import binary_dilation\nimport pandas as pd\nimport sys\nfrom sklearn.neighbors import kneighbors_graph\nfrom scipy.spatial.distance import cdist\n\nIMAGEJ_BAND_WIDTH = 200\nEIGHT_BIT_MAX = 255\n\nclass CVMask():\n '''\n Provides a class that wraps around a numpy array representing masks out of the CellVision model.\n The class provides functions to grow, remove overlaps (nearest neighbors), and export to various\n formats. All methods that change the masks modify the masks stored in the .masks property\n '''\n def __init__(self, flatmasks):\n self.masks = None\n self.flatmasks = flatmasks\n self.centroids = None\n\n def n_instances(self):\n return len(np.unique(self.flatmasks)) - 1\n\n def update_adjacency_value(self, adjacency_matrix, original, neighbor):\n border = False\n\n if original != 0 and original != neighbor:\n border = True\n if neighbor != 0:\n adjacency_matrix[int(original - 1), int(neighbor - 1)] += 1\n return border\n\n def update_adjacency_matrix(self, plane_mask_flattened, width, height, adjacency_matrix, index):\n mod_value_width = index % width\n origin_mask = plane_mask_flattened[index]\n left, right, up, down = False, False, False, False\n\n if (mod_value_width != 0):\n left = self.update_adjacency_value(adjacency_matrix, origin_mask, plane_mask_flattened[index-1])\n if (mod_value_width != width - 1):\n right = self.update_adjacency_value(adjacency_matrix, origin_mask, plane_mask_flattened[index+1])\n if (index >= width):\n up = self.update_adjacency_value(adjacency_matrix, origin_mask, plane_mask_flattened[index-width])\n if (index <= len(plane_mask_flattened) - 1 - width):\n down = self.update_adjacency_value(adjacency_matrix, origin_mask, plane_mask_flattened[index+width])\n \n if (left or right or up or down):\n adjacency_matrix[int(origin_mask - 1), int(origin_mask-1)] += 1\n\n def compute_channel_means_sums_compensated(self, image):\n height, width, n_channels = image.shape\n mask_height, mask_width = self.flatmasks.shape\n n_masks = len(np.unique(self.flatmasks)) - 1\n channel_sums = np.zeros((n_masks, n_channels))\n channel_counts = np.zeros((n_masks, n_channels))\n if n_masks == 0:\n return channel_sums, channel_sums, channel_counts\n\n squashed_image = np.reshape(image, (height*width, n_channels))\n \n #masklocs = np.nonzero(self.flatmasks)\n #plane_mask = np.zeros((mask_height, mask_width), dtype = np.uint32)\n #plane_mask[masklocs[0], masklocs[1]] = masklocs[2] + 1\n #plane_mask = plane_mask.flatten()\n plane_mask = self.flatmasks.flatten()\n \n adjacency_matrix = np.zeros((n_masks, n_masks))\n for i in range(len(plane_mask)):\n self.update_adjacency_matrix(plane_mask, mask_width, mask_height, adjacency_matrix, i)\n \n mask_val = plane_mask[i] - 1\n if mask_val != -1:\n channel_sums[mask_val.astype(np.int32)] += squashed_image[i]\n channel_counts[mask_val.astype(np.int32)] += 1\n \n \n # Normalize adjacency matrix\n for i in range(n_masks):\n adjacency_matrix[i] = adjacency_matrix[i] / (max(adjacency_matrix[i, i], 1) * 2)\n adjacency_matrix[i, i] = 1\n \n means = np.true_divide(channel_sums, channel_counts, out=np.zeros_like(channel_sums, dtype='float'), where=channel_counts!=0)\n results = lstsq(adjacency_matrix, means, overwrite_a=True, overwrite_b=False)\n compensated_means = np.maximum(results[0], np.zeros((1,1))) \n\n return compensated_means, means, channel_counts[:,0]\n\n def compute_centroids(self):\n masks = self.flatmasks\n num_masks = len(np.unique(masks)) - 1\n indices = np.where(masks != 0)\n values = masks[indices[0], indices[1]]\n\n maskframe = pd.DataFrame(np.transpose(np.array([indices[0], indices[1], values]))).rename(columns = {0:\"x\", 1:\"y\", 2:\"id\"})\n centroids = maskframe.groupby('id').agg({'x': 'mean', 'y': 'mean'}).to_records(index = False).tolist()\n \n self.centroids = centroids\n \n \n def compute_boundbox(self):\n masks = self.flatmasks\n num_masks = len(np.unique(masks)) - 1\n indices = np.where(masks != 0)\n values = masks[indices[0], indices[1]]\n\n maskframe = pd.DataFrame(np.transpose(np.array([indices[0], indices[1], values]))).rename(columns = {0:\"y\", 1:\"x\", 2:\"id\"})\n self.bb_mins = maskframe.groupby('id').agg({'y': 'min', 'x': 'min'}).to_records(index = False).tolist()\n self.bb_maxes = maskframe.groupby('id').agg({'y': 'max', 'x': 'max'}).to_records(index = False).tolist()\n \n def absolute_centroids(self, tile_row, tile_col):\n y_offset = self.flatmasks.shape[0] * (tile_row - 1)\n x_offset = self.flatmasks.shape[1] * (tile_col - 1)\n\n offsets = [y_offset, x_offset]\n\n centroids = self.centroids\n if not centroids:\n return centroids\n \n absolutes = [(cent[0] + offsets[0], cent[1] + offsets[1]) for cent in centroids]\n \n absolutes = np.array(absolutes)\n\n return absolutes\n \n def applyXYoffset(masks,offset_vector):\n\n for i in range(masks.shape[2]):\n masks[0,:,i] += offset_vector[0]\n masks[1,:,i] += offset_vector[1]\n return masks\n \n def remove_overlaps_nearest_neighbors(self, masks):\n final_masks = np.max(masks, axis = 2)\n centroids = self.centroids\n collisions = np.nonzero(np.sum(masks > 0, axis = 2) > 1)\n collision_masks = masks[collisions]\n collision_index = np.nonzero(collision_masks)\n collision_masks = collision_masks[collision_index]\n collision_frame = pd.DataFrame(np.transpose(np.array([collision_index[0], collision_masks]))).rename(columns = {0:\"collis_idx\", 1:\"mask_id\"})\n grouped_frame = collision_frame.groupby('collis_idx')\n for collis_idx, group in grouped_frame:\n collis_pos = np.expand_dims(np.array([collisions[0][collis_idx], collisions[1][collis_idx]]), axis = 0)\n prevval = final_masks[collis_pos[0,0], collis_pos[0,1]]\n mask_ids = list(group['mask_id'])\n curr_centroids = np.array([centroids[mask_id - 1] for mask_id in mask_ids])\n dists = cdist(curr_centroids, collis_pos)\n closest_mask = mask_ids[np.argmin(dists)]\n final_masks[collis_pos[0,0], collis_pos[0,1]] = closest_mask\n \n return final_masks\n \n def grow_masks(self, growth, method = 'Standard', num_neighbors = 30):\n assert method in ['Standard', 'Sequential']\n \n masks = self.flatmasks\n num_masks = len(np.unique(masks)) - 1\n \n if method == 'Standard':\n print(\"Standard growth selected\")\n masks = self.flatmasks\n num_masks = len(np.unique(masks)) - 1\n indices = np.where(masks != 0)\n values = masks[indices[0], indices[1]]\n\n maskframe = pd.DataFrame(np.transpose(np.array([indices[0], indices[1], values]))).rename(columns = {0:\"x\", 1:\"y\", 2:\"id\"})\n cent_array = maskframe.groupby('id').agg({'x': 'mean', 'y': 'mean'}).to_numpy()\n connectivity_matrix = kneighbors_graph(cent_array, num_neighbors).toarray() * np.arange(1, num_masks + 1)\n connectivity_matrix = connectivity_matrix.astype(int)\n labels = {}\n for n in range(num_masks):\n connections = list(connectivity_matrix[n, :])\n connections.remove(0)\n layers_used = [labels[i] for i in connections if i in labels]\n layers_used.sort()\n currlayer = 0\n for layer in layers_used:\n if currlayer != layer: \n break\n currlayer += 1\n labels[n + 1] = currlayer\n\n possible_layers = len(list(set(labels.values())))\n label_frame = pd.DataFrame(list(labels.items()), columns = [\"maskid\", \"layer\"])\n image_h, image_w = masks.shape\n expanded_masks = np.zeros((image_h, image_w, possible_layers), dtype = np.uint32)\n\n grouped_frame = label_frame.groupby('layer')\n for layer, group in grouped_frame:\n currids = list(group['maskid'])\n masklocs = np.isin(masks, currids)\n expanded_masks[masklocs, layer] = masks[masklocs]\n\n dilation_mask = disk(1)\n grown_masks = np.copy(expanded_masks)\n for _ in range(growth):\n for i in range(possible_layers):\n grown_masks[:, :, i] = dilation(grown_masks[:, :, i], dilation_mask)\n self.flatmasks = self.remove_overlaps_nearest_neighbors(grown_masks)\n \n elif method == 'Sequential':\n print(\"Sequential growth selected\")\n Y, X = masks.shape\n struc = disk(1)\n for _ in range(growth):\n for i in range(num_masks):\n mins = self.bb_mins[i]\n maxes = self.bb_maxes[i]\n minY, minX, maxY, maxX = mins[0] - 3*growth, mins[1] - 3*growth, maxes[0] + 3*growth, maxes[1] + 3*growth\n if minX < 0: minX = 0\n if minY < 0: minY = 0\n if maxX >= X: maxX = X - 1\n if maxY >= Y: maxY = Y - 1\n\n currreg = masks[minY:maxY, minX:maxX]\n mask_snippet = (currreg == i + 1)\n full_snippet = currreg > 0\n other_masks_snippet = full_snippet ^ mask_snippet\n dilated_mask = binary_dilation(mask_snippet, struc)\n final_update = (dilated_mask ^ full_snippet) ^ other_masks_snippet\n\n #f, axarr = plt.subplots(1, 5)\n #plt.imshow(mask_snippet)\n #axarr[0].imshow(mask_snippet)\n #axarr[1].imshow(full_snippet)\n #axarr[2].imshow(other_masks_snippet)\n #axarr[3].imshow(dilated_mask)\n #axarr[4].imshow(final_update)\n #plt.show()\n\n pix_to_update = np.nonzero(final_update)\n\n pix_X = np.array([min(j + minX, X) for j in pix_to_update[1]])\n pix_Y = np.array([min(j + minY, Y) for j in pix_to_update[0]])\n\n masks[pix_Y, pix_X] = i + 1\n\n self.flatmasks = masks\n\n def sort_into_strips(self):\n N = self.n_instances()\n unsorted = []\n \n for n in range(N):\n mask_coords = np.where(self.masks[:,:,n])\n if (len(mask_coords[0]) > 0):\n y = mask_coords[0][0]\n x = mask_coords[1][0] // IMAGEJ_BAND_WIDTH\n unsorted.append((x, y, n))\n\n sorted_masks = sorted(unsorted, key=itemgetter(0,1))\n self.masks = self.masks[:, :, [x[2] for x in sorted_masks]]\n\n def output_to_file(self, file_path):\n N = self.n_instances()\n vertex_array = []\n\n for i in range(N):\n # Mask\n mask = self.masks[:, :, i]\n\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n vertex_array.append(verts)\n\n X, Y = [], []\n for i in range(len(vertex_array)):\n x, y = zip(*(vertex_array[i]))\n X.append(x)\n Y.append(y)\n \n # Needs to be awkwardly written into file because Fiji doesn't have extensions like numpy or pickle\n with open(file_path, \"w\") as f:\n for i in range(len(X)):\n line = \"\"\n for j in range(len(X[i])):\n line += str(X[i][j]) + \" \"\n line = line.strip() + \",\"\n for k in range(len(Y[i])):\n line += str(Y[i][k]) + \" \"\n line = line.strip() + \"\\n\"\n f.write(line)\n" ]
[ [ "scipy.ndimage.morphology.binary_dilation", "numpy.sum", "numpy.nonzero", "numpy.unique", "numpy.reshape", "numpy.arange", "numpy.fliplr", "sklearn.neighbors.kneighbors_graph", "scipy.spatial.distance.cdist", "scipy.linalg.lstsq", "numpy.max", "numpy.copy", "numpy.zeros_like", "numpy.argmin", "numpy.array", "numpy.zeros", "numpy.where", "numpy.isin" ] ]
rohm1/CatalogScanner
[ "19b9846e0a0ecb94342f7fafdcc2d32fbc007335" ]
[ "critters.py" ]
[ "import itertools\nfrom common import ScanMode, ScanResult\n\nimport collections\nimport cv2\nimport enum\nimport functools\nimport json\nimport numpy\nimport os\n\nfrom typing import Dict, Iterator, List, Tuple\n\n# The expected color for the video background.\nBG_COLOR = numpy.array([207, 238, 240])\n\n\nclass CritterType(enum.Enum):\n INSECTS = 1\n FISH = 2\n SEA_CREATURES = 3\n\n @classmethod\n def from_str(cls, value: str) -> 'CritterType':\n key = value.upper().replace(' ', '_')\n return cls.__members__[key]\n\n\nclass CritterImage:\n \"\"\"The image and data associated with a critter icon.\"\"\"\n\n def __init__(self, critter_name: str, critter_type: CritterType, icon_name: str):\n img_path = os.path.join('critters', 'generated', icon_name)\n self.img = cv2.imread(img_path)\n self.critter_name = critter_name\n self.critter_type = critter_type\n self.icon_name = icon_name\n\n def __repr__(self):\n return f'CritterIcon({self.critter_name!r}, {self.critter_type!r}, {self.icon_name!r})'\n\n\nclass CritterIcon(numpy.ndarray):\n \"\"\"Dummy ndarray subclass to hold critter type info.\"\"\"\n critter_type: CritterType\n\n\ndef detect(frame: numpy.ndarray) -> bool:\n \"\"\"Detects if a given frame is showing Critterpedia.\"\"\"\n color = frame[:20, 1100:1150].mean(axis=(0, 1))\n return numpy.linalg.norm(color - BG_COLOR) < 5\n\n\ndef scan(video_file: str, locale: str = 'en-us') -> ScanResult:\n \"\"\"Scans a video of scrolling through Critterpedia and returns all critters found.\"\"\"\n critter_icons = parse_video(video_file)\n critter_names = match_critters(critter_icons)\n results = translate_names(critter_names, locale)\n\n return ScanResult(\n mode=ScanMode.CRITTERS,\n items=results,\n locale=locale.replace('auto', 'en-us'),\n )\n\n\ndef parse_video(filename: str) -> List[CritterImage]:\n \"\"\"Parses a whole video and returns icons for all critters found.\"\"\"\n all_icons: List[CritterImage] = []\n section_count: Dict[CritterType, int] = collections.defaultdict(int)\n for critter_type, frame in _read_frames(filename):\n section_count[critter_type] += 1\n for new_icon in _parse_frame(frame):\n critter_icon = new_icon.view(CritterIcon)\n critter_icon.critter_type = critter_type\n all_icons.append(critter_icon)\n\n assert section_count[CritterType.INSECTS] != 1, \\\n 'Incomplete critter scan for INSECTS section.'\n assert section_count[CritterType.FISH] != 1, \\\n 'Incomplete critter scan for FISH section.'\n\n return _remove_blanks(all_icons)\n\n\ndef match_critters(critter_icons: List[CritterImage]) -> List[str]:\n \"\"\"Matches icons against database of critter images, finding best matches.\"\"\"\n matched_critters = set()\n critter_db = _get_critter_db()\n for icon in critter_icons:\n best_match = _find_best_match(icon, critter_db[icon.critter_type])\n matched_critters.add(best_match.critter_name)\n return sorted(matched_critters)\n\n\ndef translate_names(critter_names: List[str], locale: str) -> List[str]:\n \"\"\"Translates a list of critter names to the given locale.\"\"\"\n if locale in ['auto', 'en-us']:\n return critter_names\n\n translation_path = os.path.join('critters', 'translations.json')\n with open(translation_path, encoding='utf-8') as fp:\n translations = json.load(fp)\n return [translations[name][locale] for name in critter_names]\n\n\ndef _read_frames(filename: str) -> Iterator[Tuple[CritterType, numpy.ndarray]]:\n \"\"\"Parses frames of the given video and returns the relevant region.\"\"\"\n frame_skip = 0\n last_section = None\n last_frame = None\n\n good_frames: Dict[Tuple[CritterType, int], numpy.ndarray] = {}\n\n cap = cv2.VideoCapture(filename)\n while True:\n ret, frame = cap.read()\n if not ret:\n break # Video is over\n\n if frame_skip > 0:\n frame_skip -= 1\n continue\n\n if frame.shape[:2] == (1080, 1920):\n frame = cv2.resize(frame, (1280, 720))\n\n assert frame.shape[:2] == (720, 1280), \\\n 'Invalid resolution: {1}x{0}'.format(*frame.shape)\n\n if not detect(frame):\n continue # Skip frames that are not showing critterpedia.\n\n # Detect a dark line that shows up only in Pictures Mode.\n mode_detector = frame[20:24, 600:800].mean(axis=(0, 1))\n if numpy.linalg.norm(mode_detector - (199, 234, 237)) > 50:\n raise AssertionError('Critterpedia is in Pictures Mode.')\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if filename.endswith('.jpg'): # Handle screenshots\n yield _detect_critter_section(gray), frame[149:623, :]\n continue\n\n if last_frame is None:\n last_frame = frame\n continue\n\n critter_section = _detect_critter_section(gray)\n if critter_section != last_section:\n if last_section is not None:\n frame_skip = 15\n last_section = critter_section\n continue\n\n # Grab the last frame for each side and section combination.\n if last_frame[570:600, :70, 2].min() > 230:\n good_frames[critter_section, 0] = last_frame\n elif last_frame[570:600, -70:, 2].min() > 230:\n good_frames[critter_section, 1] = last_frame\n\n last_frame = frame\n\n cap.release()\n\n for (critter_type, _), frame in good_frames.items():\n # Crop the region containing critter icons.\n yield critter_type, frame[149:623, :]\n\n\ndef _detect_critter_section(gray_frame: numpy.ndarray) -> CritterType:\n for i, critter_type in enumerate(CritterType):\n start_x, end_x = 65 + i * 65, 65 + (i + 1) * 65\n section_icon = gray_frame[70:75, start_x:end_x]\n if section_icon.min() > 150:\n return critter_type\n raise AssertionError('Invalid Critterpedia page')\n\n\ndef _parse_frame(frame: numpy.ndarray) -> Iterator[numpy.ndarray]:\n \"\"\"Parses an individual frame and extracts icons from the Critterpedia page.\"\"\"\n # Start/end verical position for the 5 grid rows.\n y_positions = [0, 95, 190, 285, 379]\n y_offsets = [5, 89]\n\n rows = []\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n for y_pos, offset in itertools.product(y_positions, y_offsets):\n line = gray[y_pos + offset - 3:y_pos + offset + 3, :]\n if line.min() < 170 or line.max() > 240:\n continue\n rows.append(line)\n\n if not rows:\n return\n\n thresh = cv2.threshold(cv2.vconcat(rows), 210, 255, 0)[1]\n separators = thresh.mean(axis=0) < 240\n x_lines = list(separators.nonzero()[0])\n\n # Normalize column lines by taking the average of all of them.\n # We know they are 112.7px apart, so we find the best offset from given lines.\n centers = [numpy.fmod(x, 112.7) for x in x_lines]\n centroid = round(numpy.median(centers))\n x_positions = numpy.arange(centroid, 1280, 112.7).astype(int)\n\n for x, y in itertools.product(x_positions, y_positions):\n if x + 96 > frame.shape[1]:\n continue # Past the right side of the frame\n yield frame[y+8:y+88, x+16:x+96]\n\n\ndef _remove_blanks(all_icons: List[numpy.ndarray]) -> List[numpy.ndarray]:\n \"\"\"Remove all icons that show empty critter boxes.\"\"\"\n filtered_icons = []\n for icon in all_icons:\n if icon[20:60, 20:60, 2].min() > 100:\n continue\n filtered_icons.append(icon)\n return filtered_icons\n\n\[email protected]_cache()\ndef _get_critter_db() -> Dict[CritterType, List[CritterImage]]:\n \"\"\"Fetches the critters database for a given locale, with caching.\"\"\"\n with open(os.path.join('critters', 'names.json')) as fp:\n critter_data = json.load(fp)\n\n critter_db = collections.defaultdict(list)\n for critter_name, icon_name, critter_type_str in critter_data:\n critter_type = CritterType.from_str(critter_type_str)\n critter = CritterImage(critter_name, critter_type, icon_name)\n critter_db[critter_type].append(critter)\n return critter_db\n\n\ndef _find_best_match(icon: numpy.ndarray, critters: List[CritterImage]) -> CritterImage:\n \"\"\"Finds the closest matching critter for the given icon.\"\"\"\n fast_similarity_metric = lambda r: cv2.absdiff(icon, r.img).mean()\n similarities = list(map(fast_similarity_metric, critters))\n sim1, sim2 = numpy.partition(similarities, kth=2)[:2]\n\n # If the match seems obvious, return the quick result.\n if abs(sim1 - sim2) > 3:\n return critters[numpy.argmin(similarities)]\n\n # Otherwise, we use a slower matching, which tries various shifts.\n def slow_similarity_metric(critter):\n diffs = []\n for x in [-2, -1, 0, 1, 2]:\n shifted = numpy.roll(icon, x, axis=1)\n diffs.append(cv2.absdiff(shifted, critter.img).sum())\n return min(diffs) # Return lowest diff across shifts.\n\n similarities = list(map(slow_similarity_metric, critters))\n return critters[numpy.argmin(similarities)]\n\n\nif __name__ == \"__main__\":\n results = scan('examples/extra/critters_badpage.mp4')\n print('\\n'.join(results.items))\n" ]
[ [ "numpy.partition", "numpy.fmod", "numpy.arange", "numpy.median", "numpy.linalg.norm", "numpy.argmin", "numpy.array", "numpy.roll" ] ]
jaymody/transformers
[ "2f64fd85dcc61a9e1156843daa978f7dbe480c1e" ]
[ "src/transformers/training_args.py" ]
[ "import dataclasses\nimport json\nimport logging\nimport os\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, Optional, Tuple\n\nfrom .file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required\n\n\nif is_torch_available():\n import torch\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef default_logdir() -> str:\n \"\"\"\n Same default as PyTorch\n \"\"\"\n import socket\n from datetime import datetime\n\n current_time = datetime.now().strftime(\"%b%d_%H-%M-%S\")\n return os.path.join(\"runs\", current_time + \"_\" + socket.gethostname())\n\n\n@dataclass\nclass TrainingArguments:\n \"\"\"\n TrainingArguments is the subset of the arguments we use in our example scripts\n **which relate to the training loop itself**.\n\n Using :class:`~transformers.HfArgumentParser` we can turn this class\n into argparse arguments to be able to specify them on the command line.\n\n Parameters:\n output_dir (:obj:`str`):\n The output directory where the model predictions and checkpoints will be written.\n overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):\n If :obj:`True`, overwrite the content of the output directory. Use this to continue training if\n :obj:`output_dir` points to a checkpoint directory.\n do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to run training or not.\n do_eval (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to run evaluation on the dev set or not.\n do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to run predictions on the test set or not.\n evaluate_during_training (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to run evaluation during training at each logging step or not.\n per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):\n The batch size per GPU/TPU core/CPU for training.\n per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):\n The batch size per GPU/TPU core/CPU for evaluation.\n gradient_accumulation_steps: (:obj:`int`, `optional`, defaults to 1):\n Number of updates steps to accumulate the gradients for, before performing a backward/update pass.\n learning_rate (:obj:`float`, `optional`, defaults to 5e-5):\n The initial learning rate for Adam.\n weight_decay (:obj:`float`, `optional`, defaults to 0):\n The weight decay to apply (if not zero).\n adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):\n Epsilon for the Adam optimizer.\n max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):\n Maximum gradient norm (for gradient clipping).\n num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):\n Total number of training epochs to perform.\n max_steps (:obj:`int`, `optional`, defaults to -1):\n If set to a positive number, the total number of training steps to perform. Overrides\n :obj:`num_train_epochs`.\n warmup_steps (:obj:`int`, `optional`, defaults to 0):\n Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.\n logging_dir (:obj:`str`, `optional`):\n Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`.\n logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Wheter to log and evalulate the first :obj:`global_step` or not.\n logging_steps (:obj:`int`, `optional`, defaults to 500):\n Number of update steps between two logs.\n save_steps (:obj:`int`, `optional`, defaults to 500):\n Number of updates steps before two checkpoint saves.\n save_total_limit (:obj:`int`, `optional`):\n If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in\n :obj:`output_dir`.\n no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Wherher to not use CUDA even when it is available or not.\n seed (:obj:`int`, `optional`, defaults to 42):\n Random seed for initialization.\n fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training.\n fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):\n For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details\n on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__.\n local_rank (:obj:`int`, `optional`, defaults to -1):\n During distributed training, the rank of the process.\n tpu_num_cores (:obj:`int`, `optional`):\n When training on TPU, the mumber of TPU cores (automatically passed by launcher script).\n debug (:obj:`bool`, `optional`, defaults to :obj:`False`):\n When training on TPU, whether to print debug metrics or not.\n dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)\n or not.\n eval_steps (:obj:`int`, `optional`, defaults to 1000):\n Number of update steps between two evaluations.\n past_index (:obj:`int`, `optional`, defaults to -1):\n Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can\n make use of the past hidden states for their predictions. If this argument is set to a positive int, the\n ``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model\n at the next training step under the keyword argument ``mems``.\n \"\"\"\n\n output_dir: str = field(\n metadata={\"help\": \"The output directory where the model predictions and checkpoints will be written.\"}\n )\n overwrite_output_dir: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Overwrite the content of the output directory.\"\n \"Use this to continue training if output_dir points to a checkpoint directory.\"\n )\n },\n )\n\n do_train: bool = field(default=False, metadata={\"help\": \"Whether to run training.\"})\n do_eval: bool = field(default=False, metadata={\"help\": \"Whether to run eval on the dev set.\"})\n do_predict: bool = field(default=False, metadata={\"help\": \"Whether to run predictions on the test set.\"})\n evaluate_during_training: bool = field(\n default=False, metadata={\"help\": \"Run evaluation during training at each logging step.\"},\n )\n\n per_device_train_batch_size: int = field(\n default=8, metadata={\"help\": \"Batch size per GPU/TPU core/CPU for training.\"}\n )\n per_device_eval_batch_size: int = field(\n default=8, metadata={\"help\": \"Batch size per GPU/TPU core/CPU for evaluation.\"}\n )\n\n per_gpu_train_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Deprecated, the use of `--per_device_train_batch_size` is preferred. \"\n \"Batch size per GPU/TPU core/CPU for training.\"\n },\n )\n per_gpu_eval_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Deprecated, the use of `--per_device_eval_batch_size` is preferred.\"\n \"Batch size per GPU/TPU core/CPU for evaluation.\"\n },\n )\n\n gradient_accumulation_steps: int = field(\n default=1,\n metadata={\"help\": \"Number of updates steps to accumulate before performing a backward/update pass.\"},\n )\n\n learning_rate: float = field(default=5e-5, metadata={\"help\": \"The initial learning rate for Adam.\"})\n weight_decay: float = field(default=0.0, metadata={\"help\": \"Weight decay if we apply some.\"})\n adam_epsilon: float = field(default=1e-8, metadata={\"help\": \"Epsilon for Adam optimizer.\"})\n max_grad_norm: float = field(default=1.0, metadata={\"help\": \"Max gradient norm.\"})\n\n num_train_epochs: float = field(default=3.0, metadata={\"help\": \"Total number of training epochs to perform.\"})\n max_steps: int = field(\n default=-1,\n metadata={\"help\": \"If > 0: set total number of training steps to perform. Override num_train_epochs.\"},\n )\n warmup_steps: int = field(default=0, metadata={\"help\": \"Linear warmup over warmup_steps.\"})\n\n logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={\"help\": \"Tensorboard log dir.\"})\n logging_first_step: bool = field(default=False, metadata={\"help\": \"Log and eval the first global_step\"})\n logging_steps: int = field(default=500, metadata={\"help\": \"Log every X updates steps.\"})\n save_steps: int = field(default=500, metadata={\"help\": \"Save checkpoint every X updates steps.\"})\n save_total_limit: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"Limit the total amount of checkpoints.\"\n \"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints\"\n )\n },\n )\n no_cuda: bool = field(default=False, metadata={\"help\": \"Do not use CUDA even when it is available\"})\n seed: int = field(default=42, metadata={\"help\": \"random seed for initialization\"})\n\n fp16: bool = field(\n default=False,\n metadata={\"help\": \"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\"},\n )\n fp16_opt_level: str = field(\n default=\"O1\",\n metadata={\n \"help\": (\n \"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\"\n )\n },\n )\n local_rank: int = field(default=-1, metadata={\"help\": \"For distributed training: local_rank\"})\n\n tpu_num_cores: Optional[int] = field(\n default=None, metadata={\"help\": \"TPU: Number of TPU cores (automatically passed by launcher script)\"}\n )\n tpu_metrics_debug: bool = field(\n default=False,\n metadata={\"help\": \"Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics\"},\n )\n debug: bool = field(default=False, metadata={\"help\": \"Whether to print debug metrics on TPU\"})\n\n dataloader_drop_last: bool = field(\n default=False, metadata={\"help\": \"Drop the last incomplete batch if it is not divisible by the batch size.\"}\n )\n eval_steps: int = field(default=1000, metadata={\"help\": \"Run an evaluation every X steps.\"})\n\n past_index: int = field(\n default=-1,\n metadata={\"help\": \"If >=0, uses the corresponding part of the output as the past state for next step.\"},\n )\n\n @property\n def train_batch_size(self) -> int:\n \"\"\"\n The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).\n \"\"\"\n if self.per_gpu_train_batch_size:\n logger.warning(\n \"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future \"\n \"version. Using `--per_device_train_batch_size` is preferred.\"\n )\n per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size\n return per_device_batch_size * max(1, self.n_gpu)\n\n @property\n def eval_batch_size(self) -> int:\n \"\"\"\n The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).\n \"\"\"\n if self.per_gpu_eval_batch_size:\n logger.warning(\n \"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future \"\n \"version. Using `--per_device_eval_batch_size` is preferred.\"\n )\n per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size\n return per_device_batch_size * max(1, self.n_gpu)\n\n @cached_property\n @torch_required\n def _setup_devices(self) -> Tuple[\"torch.device\", int]:\n logger.info(\"PyTorch: setting up devices\")\n if self.no_cuda:\n device = torch.device(\"cpu\")\n n_gpu = 0\n elif is_torch_tpu_available():\n device = xm.xla_device()\n n_gpu = 0\n elif self.local_rank == -1:\n # if n_gpu is > 1 we'll use nn.DataParallel.\n # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`\n # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will\n # trigger an error that a device index is missing. Index 0 takes into account the\n # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`\n # will use the first GPU in that env, i.e. GPU#1\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n # Here, we'll use torch.distributed.\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend=\"nccl\")\n device = torch.device(\"cuda\", self.local_rank)\n n_gpu = 1\n\n if device.type == \"cuda\":\n torch.cuda.set_device(device)\n\n return device, n_gpu\n\n @property\n @torch_required\n def device(self) -> \"torch.device\":\n \"\"\"\n The device used by this process.\n \"\"\"\n return self._setup_devices[0]\n\n @property\n @torch_required\n def n_gpu(self):\n \"\"\"\n The number of GPUs used by this process.\n\n Note:\n This will only be greater than one when you have multiple GPUs available but are not using distributed\n training. For distributed training, it will always be 1.\n \"\"\"\n return self._setup_devices[1]\n\n def to_json_string(self):\n \"\"\"\n Serializes this instance to a JSON string.\n \"\"\"\n return json.dumps(dataclasses.asdict(self), indent=2)\n\n def to_sanitized_dict(self) -> Dict[str, Any]:\n \"\"\"\n Sanitized serialization to use with TensorBoard’s hparams\n \"\"\"\n d = dataclasses.asdict(self)\n d = {**d, **{\"train_batch_size\": self.train_batch_size, \"eval_batch_size\": self.eval_batch_size}}\n\n valid_types = [bool, int, float, str]\n if is_torch_available():\n valid_types.append(torch.Tensor)\n\n return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}\n" ]
[ [ "torch.distributed.init_process_group", "torch.cuda.set_device", "torch.cuda.is_available", "torch.device", "torch.cuda.device_count" ] ]
Mxbonn/zigzag_fork
[ "250ee5e22904ba846dfb106983d46b83bd9ee230" ]
[ "cost_model_funcs.py" ]
[ "import numpy as np\nimport copy\nimport sys\nimport math\nfrom numpy import prod\n\n\"\"\"\n\nThis file includes all the functions used in the cost model.\n\n\"\"\"\n\n\ndef get_operand_level_energy_cost(operand, level, mem_word_cost, mac_array_info, schedule_info, loop, mem_fifo,\n mem_scheme, precision, utilization, sum_shared_bw):\n # mac_cost = get_mac_cost(layer, mac_array_info['single_mac_energy'])\n #\n # if level < len(schedule_info['temporal'][operand]) - 1:\n # wire_cost = get_operand_level_wire_cost(operand, level, schedule_info, mac_array_info, loop, mem_fifo)\n # else:\n # wire_cost = 0\n wire_cost = 0\n\n mem_cost_dy = get_operand_level_dynamic_mem_cost(operand, level, loop, mem_word_cost, mem_scheme, precision,\n utilization, sum_shared_bw)\n\n mem_cost_st = get_static_mem_cost()\n\n return [wire_cost, mem_cost_dy, mem_cost_st]\n\n\ndef get_active_mac_cost(layer, single_mac_energy):\n return layer.total_MAC_op * single_mac_energy\n\n\ndef get_idle_mac_cost(layer, layer_rounded, array_size, idle_mac_energy, spatial_unrolling):\n idle_mac_cost = []\n for su in spatial_unrolling:\n active_mac_count = 1\n for level_list in su['W']:\n if level_list:\n for su_unit in level_list:\n active_mac_count *= su_unit[1]\n total_mapping_count = math.ceil(layer_rounded.total_MAC_op/active_mac_count)\n ideal_mac_count = total_mapping_count * array_size[0] * array_size[1]\n idle_mac_count = ideal_mac_count - layer.total_MAC_op\n idle_mac_cost.append(idle_mac_count * idle_mac_energy)\n return idle_mac_cost\n\n\ndef get_operand_level_inter_pe_distance(op, operand_partitions, input_temporal_loops, is_fifo):\n \"\"\"\n The function computes the worst inter-PE distance to be covered by each variable accessed at level above of array for\n each operand across the listed dimensions.\n If two dimensions, first dimension is assumed to be the unrolling/flooring of operand across columns\n second dimension is assumed to be the unrolling/flooring of operand across rows\n\n Across each dimension, if flooring is present, each flooring block has to be reached via an offset distance, where\n offset is a list that contains the offsets for each flooring block.\n Inside each flooring block, given the unrolling scheme, an inter-block distance has to be covered.\n\n The distance for each array dimension is computed as the sum(distance) * len(offset) + sum(offset)\n\n :param op: Operand type, char variable in ['I','W','O']\n :param operand_partitions: Unrolling and flooring scheme for all partitions at a given level, expressed as\n operand_partitions[operand][dimension][flooring block type][flooring block size]\n :return: distance covered by the given operand type with the given operand partition scheme\n \"\"\"\n\n '''\n operand_irrelevant_types contains the irrelevant loop for each operand as enum integers (ref. layer.py)\n operand_irrelevant_types[operand][irrelevant types]\n '''\n output_irrelevant = [1, 2, 5, 7]\n input_irrelevant = [6]\n weight_irrelevant = [3, 4, 7]\n operand_irrelevant_types = {'W': weight_irrelevant, 'I': input_irrelevant, 'O': output_irrelevant}\n opit_distance = [0]\n # Count array cost for each operand type\n operand_partitions = operand_partitions[op]\n opit = 0\n total_distance_opit = 0\n length = []\n lengthx = [0]\n distance = []\n count = []\n dim_distance = []\n\n '''\n If the operand_partitions at the given level list is empty, the distance covered in the array will be zero since\n there's no unrolling, so returns [0] \n '''\n if not operand_partitions:\n return [0]\n else:\n for dim in range(len(operand_partitions)):\n '''\n INTER-BLOCK DISTANCE\n Inside each flooring block, the unrolling scheme is considered. The inter-block distance is the sum of \n jumps between elements of the array times the length of each jump. \n TODO rest of commenting :P\n '''\n distance = []\n for i in range(0, len(operand_partitions[dim])):\n if not operand_partitions[dim][i]:\n continue\n if operand_partitions[dim][i][0] in operand_irrelevant_types[op]:\n # COUNT OF JUMPS\n n = operand_partitions[dim][i][1]\n nx = 1\n for j in range(len(operand_partitions[dim]) - 2, i, -1):\n if operand_partitions[dim][j][0] in operand_irrelevant_types[op]:\n nx = nx * operand_partitions[dim][j][1]\n n = n * nx\n count.append(n)\n # LENGTH OF EACH JUMP\n size_partitions_below = 1\n for k in range(0, i):\n size_partitions_below = size_partitions_below * operand_partitions[dim][k][1]\n length.append(size_partitions_below - lengthx[-1])\n lengthx.append(size_partitions_below)\n distance.append(length[-1] * count[-1])\n\n '''\n OFFSET\n Given the flooring/unrolling scheme, across the considered dimension if irrelevant partition loops are present\n \"islands\" (the flooring blocks) of repeated values can be localized. While the distance inside the flooring \n blocks is computed as the inter-block distance, each block is also characterized by an offset wrt source.\n\n The offset for each block is computed as base_step * j + baseline. \n Baseline is the distance to be covered that contains all previous flooring blocks\n Base step is the product of the sizes of the innermost relevant partitions in the flooring block\n '''\n offset = []\n base_step = 1\n if operand_partitions[dim]:\n first_relevant_partition = len(operand_partitions[dim])\n for i in range(len(operand_partitions[dim])):\n if operand_partitions[dim][i][0] not in operand_irrelevant_types[op]:\n first_relevant_partition = i\n for j in range(0, i):\n base_step = base_step * operand_partitions[dim][j][1]\n baseline = 0\n for j in range(0, operand_partitions[dim][first_relevant_partition][1]):\n offset.append(base_step * j + baseline)\n break\n\n for i in range(first_relevant_partition + 1, len(operand_partitions[dim])):\n if operand_partitions[dim][i][0] not in operand_irrelevant_types[op]:\n for k in range(1, operand_partitions[dim][i][1]):\n baseline = 1\n if i == 0:\n baseline = 0\n for j in range(0, i):\n baseline = baseline * operand_partitions[dim][j][1] * k\n for j in range(0, operand_partitions[dim][first_relevant_partition][1]):\n offset.append(base_step * j + baseline)\n if not operand_partitions[dim] or not offset:\n offset = [0]\n\n rtl = 1\n fifo_distance = 0\n if op == 'I':\n if is_fifo == True:\n unroll_size = 1\n unroll_loop_type = 0\n for j in range(len(operand_partitions[dim])):\n if operand_partitions[dim][j][0] in [1, 2, 3, 4]:\n unroll_loop_type = operand_partitions[dim][j][0]\n for m in range(0, j + 1):\n unroll_size *= operand_partitions[dim][m][1]\n break\n first_relevant_temporal_loop_size = 1\n tmp_tl = [i for i in input_temporal_loops if i[0] != 6]\n\n if tmp_tl:\n if unroll_loop_type == 1:\n if tmp_tl[0][0] == 3:\n rtl = tmp_tl[0][1]\n if unroll_loop_type == 3:\n try:\n if tmp_tl[0][0] == 1:\n rtl = tmp_tl[0][1]\n except:\n a = 1\n if unroll_loop_type == 2:\n if tmp_tl[0][0] == 4:\n rtl = tmp_tl[0][1]\n if unroll_loop_type == 4:\n if tmp_tl[0][0] == 2:\n rtl = tmp_tl[0][1]\n # TODO this formula has to be corrected. 1 should be the number of irrelevant jumps and 0 the sum of the lenghts of the irrelevant jumps\n # Since we assume that there are no replications (FOR NOW) it will be corrected later\n\n fifo_distance = (rtl - 1) * (unroll_size - 1) * 1 + 0\n\n div_factor = 1\n # if op != 'I':\n # for j in range(len(operand_partitions[dim])):\n # if operand_partitions[dim][j][0] not in operand_irrelevant_types[op]:\n # div_factor *= operand_partitions[dim][j][1]\n # else:\n # for j in range(len(operand_partitions[dim])):\n # if operand_partitions[dim][j][0] not in operand_irrelevant_types[op]:\n # div_factor *= operand_partitions[dim][j][1]\n # div_factor = div_factor + rtl - 1\n\n dim_distance.append((sum(distance) * len(offset) + sum(offset) + fifo_distance) / div_factor)\n\n '''\n In the case of two dimensions, the distance is computed as in a 2D mesh network:\n The distance across rows is taken only once\n The distance across columns is multiplied by the number of rows\n '''\n if len(operand_partitions) == 2:\n num_rows = 1\n for i in range(len(operand_partitions[1])):\n num_rows = num_rows * operand_partitions[1][i][1]\n row_distance = dim_distance[0] * num_rows\n col_distance = dim_distance[1]\n total_distance_opit = row_distance + col_distance\n if len(operand_partitions) == 1:\n total_distance_opit = dim_distance[0]\n\n opit_distance[opit] = total_distance_opit\n\n return opit_distance\n\n\ndef get_operand_level_wire_cost(op, level, schedule_info, mac_array_info, loop, mem_fifo):\n return 0\n\n # \"\"\"\n # Wire cost is calculated as inter-PE cost + memory interconnection cost\n # \"\"\"\n # # Inter-PE cost\n # \"\"\"\n # Get above-array-level memory (just one level above the array) access count for W/I/O (total access for each),\n # and times them with corresponding inter-PE movement step (based on spatial unrolling type and size)\n # and unit_wire_energy (energy for 1-bit data movement between neighbour PE)\n # \"\"\"\n # \"\"\"\n # Inter-PE distance covered\n # \"\"\"\n #\n # operand_types = ['W', 'I', 'O']\n #\n # partitions = {\n # 'W': [],\n # 'I': [],\n # 'O': []}\n #\n # '''\n # Given that the spatial unrolling and the flooring loops are stored in two different variables (ref\n # cost_model_input.schedule_info), in order to compute the array cost is easier to change the representation in a\n # unique variable that stores the information for both spatial unrolling and flooring.\n # Operand partitions are represented as:\n # operand_paritions[operand][level][dimension][flooring block type][flooring block size]\n # Where operand in ['I','W','O']\n # '''\n # for operand in operand_types:\n # for lev in range(0, len(schedule_info['spatial'][operand])):\n # partitions[operand].append([])\n # for floor_dim in range(0, len(schedule_info['flooring'][operand][lev])):\n # partitions[operand][lev].append([])\n # for flooring_type in range(0, len(schedule_info['flooring'][operand][lev][floor_dim])):\n # w, a = zip(*schedule_info['spatial'][operand][lev])\n # try:\n # partitions[operand][lev][floor_dim].append(\n # list(schedule_info['spatial'][operand][lev][\n # w.index(schedule_info['flooring'][operand][lev][floor_dim][flooring_type])]))\n # except:\n # return 0\n # for operand in operand_types:\n # partitions[operand] += [[]] * (level)\n # for lev in range(0, len(partitions[operand])):\n # if not partitions[operand][lev]:\n # partitions[operand][lev].append([])\n # try:\n # operand_partitions = {'I': partitions['I'][level], 'O': partitions['O'][level], 'W': partitions['W'][level]}\n # except IndexError:\n # print({'I': partitions['I'], 'O': partitions['O'], 'W': partitions['W']})\n # print(level, op)\n # # continue\n # # sys.exit()\n # a = 1\n # '''\n # Given the adopted representation for the unrolling and flooring schemes, the variable is passed to the function that\n # computes the distance that each single variable has to cover in order to reach its position in the array\n # '''\n # try:\n # operand_distance = get_operand_level_inter_pe_distance(op, operand_partitions,\n # schedule_info['temporal'][op][level], loop.I_fifo[level])\n # except IndexError:\n # operand_distance = get_operand_level_inter_pe_distance(op, operand_partitions,\n # schedule_info['temporal'][op][level], False)\n # operand_distance = np.array(operand_distance)\n # operand_irrelevant = {'W': [3, 4, 7], 'I': [6], 'O': [1, 2, 5]}\n # '''\n # INPUT COST : computed as (distance covered) x (read accesses from level above) x (unit bit wire cost) x (bits\n # of precision)\n # '''\n # div_factor = 1\n # for partition in operand_partitions[op]:\n # for unrolling in partition:\n # if unrolling[0] not in operand_irrelevant[op]:\n # div_factor *= unrolling[1]\n #\n # if op == 'I':\n # if mem_fifo[op][level]:\n # array_cost = operand_distance[0] * loop.mem_access_elem['I'][level][0] * mac_array_info[\n # 'unit_wire_energy'][level] * \\\n # mac_array_info['precision'] / div_factor\n # else:\n # array_cost = operand_distance[0] * loop.mem_access_elem['I'][level][0] * mac_array_info[\n # 'unit_wire_energy'][level] * \\\n # mac_array_info['precision'] / div_factor\n #\n # '''\n # WEIGHT COST : computed as (distance covered) x (read accesses from level above) x (unit bit wire cost) x (bits\n # of precision)\n # '''\n # if op == 'W':\n # array_cost = operand_distance[0] * loop.mem_access_elem['W'][level][0] * mac_array_info['unit_wire_energy'][\n # level] * \\\n # mac_array_info['precision'] / div_factor\n #\n # '''\n # OUTPUT COST :\n # if PARTIAL OUTPUT: (distance covered) x (read+write accesses from level above) x (unit bit wire cost) x (bits of\n # precision+headroom bits)\n # if FINAL OUTPUT: (distance covered) x (write accesses to level above) x (unit bit wire cost) x (bits of\n # precision)\n # '''\n # if op == 'O':\n # if loop.mem_access_elem['O_final'][level][0][1] == 0:\n # array_cost = operand_distance[0] * sum(loop.mem_access_elem['O_partial'][level][0]) * \\\n # mac_array_info['unit_wire_energy'][level] * \\\n # sum([mac_array_info['precision'] * 2, mac_array_info['headroom']]) / div_factor\n # else:\n # array_cost = operand_distance[0] * loop.mem_access_elem['O_final'][level][0][1] * \\\n # mac_array_info['unit_wire_energy'][level] * \\\n # mac_array_info['precision'] / div_factor\n #\n # return array_cost\n #\n # # TODO Memory interconnection cost\n\n\ndef get_operand_level_wire_distance(op, level, schedule_info, mac_array_info, loop, mem_fifo):\n return [0, 0]\n # \"\"\"\n # Wire cost is calculated as inter-PE cost + memory interconnection cost\n # \"\"\"\n # # Inter-PE cost\n # \"\"\"\n # Get above-array-level memory (just one level above the array) access count for W/I/O (total access for each),\n # and times them with corresponding inter-PE movement step (based on spatial unrolling type and size)\n # and unit_wire_energy (energy for 1-bit data movement between neighbour PE)\n # \"\"\"\n # \"\"\"\n # Inter-PE distance covered\n # \"\"\"\n #\n # operand_types = ['W', 'I', 'O']\n #\n # partitions = {\n # 'W': [],\n # 'I': [],\n # 'O': []}\n #\n # '''\n # Given that the spatial unrolling and the flooring loops are stored in two different variables (ref\n # cost_model_input.schedule_info), in order to compute the array cost is easier to change the representation in a\n # unique variable that stores the information for both spatial unrolling and flooring.\n # Operand partitions are represented as:\n # operand_paritions[operand][level][dimension][flooring block type][flooring block size]\n # Where operand in ['I','W','O']\n # '''\n # # for operand in operand_types:\n # # for lev in range(0, len(schedule_info['spatial'][operand])):\n # # partitions[operand].append([])\n # # for floor_dim in range(0, len(schedule_info['flooring'][operand][lev])):\n # # partitions[operand][lev].append([])\n # # for flooring_type in range(0, len(schedule_info['flooring'][operand][lev][floor_dim])):\n # # w, a = zip(*schedule_info['spatial'][operand][lev])\n # # try:\n # # partitions[operand][lev][floor_dim].append(\n # # list(schedule_info['spatial'][operand][lev][\n # # w.index(schedule_info['flooring'][operand][lev][floor_dim][flooring_type])]))\n # # except:\n # # a=1\n # for operand in operand_types:\n # for lev in range(0, len(schedule_info['spatial'][operand])):\n # partitions[operand].append([])\n # for floor_dim in range(0, len(schedule_info['flooring'][operand][lev])):\n # partitions[operand][lev].append([])\n # for flooring_type in range(0, len(schedule_info['flooring'][operand][lev][floor_dim])):\n # w, a = zip(*schedule_info['spatial'][operand][lev])\n # try:\n # partitions[operand][lev][floor_dim].append(\n # list(schedule_info['spatial'][operand][lev][\n # w.index(schedule_info['spatial'][operand][lev][flooring_type][0])]))\n # except:\n # return 0\n #\n # for operand in operand_types:\n # partitions[operand] += [[]] * (level)\n # for lev in range(0, len(partitions[operand])):\n # if not partitions[operand][lev]:\n # partitions[operand][lev].append([])\n # try:\n # operand_partitions = {'I': partitions['I'][level], 'O': partitions['O'][level], 'W': partitions['W'][level]}\n # except IndexError:\n # print({'I': partitions['I'], 'O': partitions['O'], 'W': partitions['W']})\n # print(level, op)\n # # continue\n # # sys.exit()\n # a = 1\n # operand_partitions = {'I': partitions['I'][level], 'O': partitions['O'][level], 'W': partitions['W'][level]}\n #\n # '''\n # Given the adopted representation for the unrolling and flooring schemes, the variable is passed to the function that\n # computes the distance that each single variable has to cover in order to reach its position in the array\n # '''\n # try:\n # operand_distance = get_operand_level_inter_pe_distance(op, operand_partitions,\n # schedule_info['temporal'][op][level],\n # loop.I_fifo[level])\n # except IndexError:\n # operand_distance = get_operand_level_inter_pe_distance(op, operand_partitions,\n # schedule_info['temporal'][op][level], False)\n # operand_distance = np.array(operand_distance)\n #\n # operand_irrelevant = {'W': [3, 4, 7], 'I': [6], 'O': [1, 2, 5]}\n # div_factor = 1\n # for partition in operand_partitions[op]:\n # for unrolling in partition:\n # if unrolling[0] not in operand_irrelevant[op]:\n # div_factor *= unrolling[1]\n #\n # if op == 'I':\n # if mem_fifo[op][level]:\n # array_distance = operand_distance[0] * loop.mem_access_elem['I'][level][0] * mac_array_info[\n # 'precision'] / div_factor\n # else:\n # array_distance = operand_distance[0] * loop.mem_access_elem['I'][level][0] * mac_array_info[\n # 'precision'] / div_factor\n #\n # if op == 'W':\n # array_distance = operand_distance[0] * loop.mem_access_elem['W'][level][0] * mac_array_info[\n # 'precision'] / div_factor\n #\n # if op == 'O':\n #\n # if loop.mem_access_elem['O_final'][level][0][1] == 0:\n #\n # array_distance = operand_distance[0] * sum(loop.mem_access_elem['O_partial'][level][0]) * sum(\n # [mac_array_info['precision'] * 2, mac_array_info['headroom']]) / div_factor\n # else:\n # array_distance = operand_distance[0] * loop.mem_access_elem['O_final'][level][0][1] * mac_array_info[\n # 'precision'] / div_factor\n #\n # return [array_distance, list(operand_distance)]\n\n\ndef iterative_data_format_clean(original_dict):\n new_dict = {'W':[], 'I':[], 'O':[]}\n for operand in ['W', 'I', 'O']:\n for li in original_dict[operand]:\n new_dict[operand].append(li[0])\n return new_dict\n\n\ndef get_operand_level_dynamic_mem_cost(operand, level, loop, mem_word_cost, mem_scheme, precision, utilization,\n sum_shared_bw):\n \"\"\"\n The function computes the dynamic energy consumed for accessing a memory at a certain level for a given operand\n :param operand : Should be one of ['I', 'O', 'W']\n :param level : Integer number, level with respect to temporal blocking distribution\n :param loop : loop object, contains number of memory accesses. For ref, view loop class\n :param mem_word_cost : mem word energy cost\n\n The dynamic energy consumption is computed as read cost + write cost at a given memory level for a defined operand\n\n For 'I', 'W':\n Read (write) cost are computed as the product of number of read (write) memory accesses per level times the\n cost per word access\n For 'O':\n Given that outputs are divided in two categories (O_partial and O_final) with different access costs and\n Given that at each level there's different numbers of writes and reads to level below and above\n The read (write) cost is computed as the sum of read (write) accesses to level below + read (write) accesses to\n level above for O_partial and for O_final times the relative access costs\n \"\"\"\n\n \"\"\"\n FOR COMPUTING SINGLE COST : (PRECISION / BW) * ACCESS COST\n \"\"\"\n\n if type(mem_scheme.mem_bw['W'][0][0]) in [list, tuple]:\n mem_scheme.mem_bw = iterative_data_format_clean(mem_scheme.mem_bw)\n\n if type(mem_word_cost['W'][0][0]) in [list, tuple]:\n mem_word_cost = iterative_data_format_clean(mem_word_cost)\n\n if not sum_shared_bw:\n # READ COST\n if utilization.req_mem_bw_bit[operand][level][0] <= mem_scheme.mem_bw[operand][level][0]:\n if operand == 'O':\n read_cost = (((loop.mem_access_elem['O_final'][level][0][0] + loop.mem_access_elem['O_final'][level][1][0]) * precision['O_final'] / \\\n mem_scheme.mem_bw[operand][level][0]) * utilization.pun_factor[operand][level] *\n mem_word_cost['O'][level][0]) + (((loop.mem_access_elem['O_partial'][level][0][0] +\n loop.mem_access_elem['O_partial'][level][1][0]) *\n precision['O'] / mem_scheme.mem_bw[operand][level][0])\n * utilization.pun_factor[operand][level] *\n mem_word_cost['O'][level][0])\n else:\n read_cost = (loop.mem_access_elem[operand][level][0] * precision[operand] /\n mem_scheme.mem_bw[operand][level][0]) * utilization.pun_factor[operand][level] * \\\n mem_word_cost[operand][level][0]\n\n else:\n if operand == 'O':\n read_cost = (((loop.mem_access_elem['O_final'][level][0][0] + loop.mem_access_elem['O_final'][level][1][0]) *\n (precision['O_final'] / mem_scheme.mem_bw[operand][level][0])) * mem_word_cost['O'][level][0]) + \\\n (((loop.mem_access_elem['O_partial'][level][0][0] +\n loop.mem_access_elem['O_partial'][level][1][0]) *\n (precision['O'] / mem_scheme.mem_bw[operand][level][0])) * mem_word_cost['O'][level][0])\n else:\n read_cost = (loop.mem_access_elem[operand][level][0] * precision[operand] /\n mem_scheme.mem_bw[operand][level][0]) * mem_word_cost[operand][level][0]\n\n # WRITE COST\n if utilization.req_mem_bw_bit[operand][level][1] <= mem_scheme.mem_bw[operand][level][1]:\n if operand == 'O':\n write_cost = (((loop.mem_access_elem['O_final'][level][0][1] +\n loop.mem_access_elem['O_final'][level][1][1]) * precision['O_final'] /\n mem_scheme.mem_bw[operand][level][1]) * mem_word_cost['O'][level][1]) + (\n ((loop.mem_access_elem['O_partial'][level][0][1] +\n loop.mem_access_elem['O_partial'][level][1][1]) * precision['O'] /\n mem_scheme.mem_bw[operand][level][1])\n * mem_word_cost['O'][level][1])\n else:\n write_cost = (loop.mem_access_elem[operand][level][1] * precision[operand] /\n mem_scheme.mem_bw[operand][level][1]) * mem_word_cost[operand][level][1]\n else:\n if operand == 'O':\n write_cost = ((loop.mem_access_elem['O_final'][level][0][1] + loop.mem_access_elem['O_final'][level][1][1]) *\n (precision['O_final'] / mem_scheme.mem_bw[operand][level][1]) * mem_word_cost['O'][level][1]) + (\n (loop.mem_access_elem['O_partial'][level][0][1] +\n loop.mem_access_elem['O_partial'][level][1][1]) *\n (precision['O'] / mem_scheme.mem_bw[operand][level][1]) * mem_word_cost['O'][level][1])\n else:\n write_cost = loop.mem_access_elem[operand][level][1] * (\n precision[operand] / mem_scheme.mem_bw[operand][level][1]) * mem_word_cost[operand][level][1]\n\n else:\n if utilization.req_sh_mem_bw_bit[operand][level][0] <= mem_scheme.mem_bw[operand][level][0]:\n if operand == 'O':\n read_cost = (((loop.mem_access_elem['O_final'][level][0][0] + loop.mem_access_elem['O_final'][level][1][0]) * precision['O_final'] / \\\n mem_scheme.mem_bw[operand][level][0]) * utilization.pun_factor[operand][level] *\n mem_word_cost['O'][level][0]) + (\n ((loop.mem_access_elem['O_partial'][level][0][0] +\n loop.mem_access_elem['O_partial'][level][1][0]) * precision['O'] /\n mem_scheme.mem_bw[operand][level][0]) * utilization.pun_factor[operand][level]\n * mem_word_cost['O'][level][0])\n else:\n read_cost = (loop.mem_access_elem[operand][level][0] * precision[operand] /\n mem_scheme.mem_bw[operand][level][0]) * utilization.pun_factor[operand][level] * \\\n mem_word_cost[operand][level][0]\n else:\n if operand == 'O':\n read_cost = (((loop.mem_access_elem['O_final'][level][0][0] + loop.mem_access_elem['O_final'][level][1][0]) * \\\n (precision['O_final'] / mem_scheme.mem_bw[operand][level][0])) * mem_word_cost['O'][level][0]) + (((loop.mem_access_elem['O_partial'][level][0][0] +\n loop.mem_access_elem['O_partial'][level][1][0]) * (\n precision['O'] / mem_scheme.mem_bw[operand][level][0]))\n * mem_word_cost['O'][level][0])\n else:\n read_cost = (loop.mem_access_elem[operand][level][0] * (\n precision[operand] / mem_scheme.mem_bw[operand][level][0])) * mem_word_cost[operand][level][0]\n\n # WRITE COST\n if utilization.req_mem_bw_bit[operand][level][1] <= mem_scheme.mem_bw[operand][level][1]:\n if operand == 'O':\n write_cost = (((loop.mem_access_elem['O_final'][level][0][1] +\n loop.mem_access_elem['O_final'][level][1][1]) * precision['O_final'] / \\\n mem_scheme.mem_bw[operand][level][1]) * mem_word_cost['O'][level][1]) + (\n ((loop.mem_access_elem['O_partial'][level][0][1] +\n loop.mem_access_elem['O_partial'][level][1][1]) * precision['O'] /\n mem_scheme.mem_bw[operand][level][1])\n * mem_word_cost['O'][level][1])\n else:\n write_cost = (loop.mem_access_elem[operand][level][1] * precision[operand] /\n mem_scheme.mem_bw[operand][level][1]) * \\\n mem_word_cost[operand][level][1]\n else:\n if operand == 'O':\n write_cost = ((loop.mem_access_elem['O_final'][level][0][1] + loop.mem_access_elem['O_final'][level][1][1]) *\n (precision['O_final'] / mem_scheme.mem_bw[operand][level][1]) * mem_word_cost['O'][1][\n level]) + (\n (loop.mem_access_elem['O_partial'][level][0][1] +\n loop.mem_access_elem['O_partial'][level][1][1]) *\n (precision['O'] / mem_scheme.mem_bw[operand][level][1]) * mem_word_cost['O'][1][\n level])\n else:\n write_cost = loop.mem_access_elem[operand][level][1] * (\n precision[operand] / mem_scheme.mem_bw[operand][level][1]) * mem_word_cost[operand][level][1]\n\n return read_cost + write_cost\n\n\n# TODO need to know memory operating frequency and leakage power. Ignore static memory cost for now.\ndef get_static_mem_cost():\n return 0\n\n\ndef su_correction(mem_scheme):\n su_len = {'W': len(mem_scheme.spatial_unrolling[0]['W']),\n 'I': len(mem_scheme.spatial_unrolling[0]['I']),\n 'O': len(mem_scheme.spatial_unrolling[0]['O'])}\n mem_len = {'W': len(mem_scheme.mem_type['W']),\n 'I': len(mem_scheme.mem_type['I']),\n 'O': len(mem_scheme.mem_type['O'])}\n\n for operand in ['W','I','O']:\n if su_len[operand] > mem_len[operand]+1:\n mem_scheme.spatial_unrolling[0][operand] = mem_scheme.spatial_unrolling[0][operand][:mem_len[operand]+1]\n mem_scheme.flooring[0][operand] = mem_scheme.flooring[0][operand][:mem_len[operand]+1]\n elif su_len[operand] < mem_len[operand]+1:\n append_su = [[]]*(mem_len[operand] + 1 -su_len[operand])\n mem_scheme.spatial_unrolling[0][operand].extend(append_su)\n mem_scheme.flooring[0][operand].extend(append_su)\n return mem_scheme\n\n\ndef get_mem_complete_unrolling_count(spatial_unrolling, flooring, array_size):\n \"\"\"\n This function compute the complete memory unrolling count (active ones + inactive ones) for later area estimation.\n \"\"\"\n XY_dimension_unrolling = [[], []]\n XY_dimension_unit_count = [\n {'W': [1] * len(flooring['W']), 'I': [1] * len(flooring['I']), 'O': [1] * len(flooring['O'])},\n {'W': [1] * len(flooring['W']), 'I': [1] * len(flooring['I']), 'O': [1] * len(flooring['O'])}]\n XY_dimension_mem_count_active = [\n {'W': [1] * (len(flooring['W'])-1), 'I': [1] * (len(flooring['I'])-1), 'O': [1] * (len(flooring['O'])-1)},\n {'W': [1] * (len(flooring['W'])-1), 'I': [1] * (len(flooring['I'])-1), 'O': [1] * (len(flooring['O'])-1)}]\n XY_dimension_mem_count_total = [\n {'W': [1] * (len(flooring['W'])-1), 'I': [1] * (len(flooring['I'])-1), 'O': [1] * (len(flooring['O'])-1)},\n {'W': [1] * (len(flooring['W'])-1), 'I': [1] * (len(flooring['I'])-1), 'O': [1] * (len(flooring['O'])-1)}]\n mem_count_active = {'W': [1] * (len(flooring['W']) - 1), 'I': [1] * (len(flooring['I']) - 1),\n 'O': [1] * (len(flooring['O']) - 1)}\n mem_count_total = {'W': [1] * (len(flooring['W'])-1), 'I': [1] * (len(flooring['I'])-1),\n 'O': [1] * (len(flooring['O'])-1)}\n XY_dimension_area_utilize = [{'W': 0, 'I': 0, 'O': 0},\n {'W': 0, 'I': 0, 'O': 0}]\n op_ir_loops = {'W': [3, 4, 7], 'I': [6], 'O': [1, 2, 5]}\n for floor_level in flooring['W']:\n if floor_level:\n for XY, floor_XY in enumerate(floor_level):\n if floor_XY:\n XY_dimension_unrolling[XY].extend(floor_XY)\n for op in ['W', 'I', 'O']:\n for level, floor_level in enumerate(flooring[op]):\n if floor_level:\n i = 0\n for XY, floor_XY in enumerate(floor_level):\n if floor_XY:\n for floor_single in floor_XY:\n if spatial_unrolling[op][level][i][0] != floor_single:\n raise ValueError(\"spatial_unrolling's and flooring's order do not match.\")\n XY_dimension_unit_count[XY][op][level] *= spatial_unrolling[op][level][i][1]\n i += 1\n for XY in range(len(XY_dimension_unit_count)):\n for op in ['W', 'I', 'O']:\n XY_dimension_area_utilize[XY][op] = prod(XY_dimension_unit_count[XY][op]) / array_size[XY]\n for level in range(1, len(XY_dimension_unit_count[XY][op])):\n XY_dimension_mem_count_active[XY][op][level-1] = prod(XY_dimension_unit_count[XY][op][level:])\n for op in ['W', 'I', 'O']:\n for level in range(1, len(spatial_unrolling[op])):\n if spatial_unrolling[op][level]:\n for XY in [0, 1]:\n if all(loop_type in op_ir_loops[op] for loop_type in XY_dimension_unrolling[XY]):\n XY_dimension_mem_count_total[XY][op][level-1] = XY_dimension_mem_count_active[XY][op][level-1]\n else:\n XY_dimension_mem_count_total[XY][op][level - 1] = \\\n int(round(XY_dimension_mem_count_active[XY][op][level-1]/XY_dimension_area_utilize[XY][op]))\n for op in ['W', 'I', 'O']:\n for level in range(len(XY_dimension_mem_count_active[0][op])):\n mem_count_active[op][level] = XY_dimension_mem_count_active[0][op][level] * \\\n XY_dimension_mem_count_active[1][op][level]\n mem_count_total[op][level] = XY_dimension_mem_count_total[0][op][level] * \\\n XY_dimension_mem_count_total[1][op][level]\n return mem_count_active, mem_count_total\n" ]
[ [ "numpy.prod" ] ]
lonelycorn/machine-learning
[ "812b4d4f214dc28463cb87bada4e88d0d0cf4184" ]
[ "plsr/Plsr.py" ]
[ "import numpy as np\nimport copy\n\n\ndef decompose(X_raw, Y_raw, M_star):\n \"\"\"\n :param [in] X_raw: M-by-N matrix where each COL is a sample input\n :param [in] Y_raw: L-by-N matrix where each COL is a sample output\n :return (v, p, q, C_YY_history, C_XX_history)\n \"\"\"\n assert(X_raw.shape[1] == Y_raw.shape[1])\n\n # make a copy so we don't introduce surprises\n X = copy.deepcopy(X_raw)\n Y = copy.deepcopy(Y_raw)\n\n # record the dimensions\n (M, N) = X.shape\n (L, _) = Y.shape\n\n # L-by-M\n C_YX = np.matmul(Y, X.T) / (N - 1)\n # M-by-M\n C_XX = np.matmul(X, X.T) / (N - 1)\n # L-by-L\n C_YY = np.matmul(Y, Y.T) / (N - 1)\n\n # each col is a new basis of the input\n v = np.zeros((M, M_star))\n # loading for output\n q = np.zeros((L, M_star))\n # loading for input\n p = np.zeros((M, M_star))\n\n # residual information after using first k latent variables\n C_YY_history = np.zeros(M_star + 1)\n C_XX_history = np.zeros(M_star + 1)\n C_YY_history[0] = 1.0\n C_XX_history[0] = 1.0\n C_YY_init_trace = np.trace(C_YY)\n C_XX_init_trace = np.trace(C_XX)\n #print(f\"initial tr(C_YY) = {C_YY_init_trace}\")\n #print(f\"initial tr(C_XX) = {C_XX_init_trace}\")\n\n # deflated samples\n # NOTE: we're not making copies because PLSR only needs C_XX and C_YX\n X1 = X\n Y1 = Y\n for i in range(M_star):\n (W, D, VT) = np.linalg.svd(C_YX)\n\n # save the most dominant direction\n # M-by-1\n v0 = VT[0, :]\n v[:, i] = v0\n v0 = v0.reshape(-1, 1)\n\n # 1-by-N, input score\n z0 = np.matmul(v0.T, X1).reshape(1, -1)\n\n # 1-by-1, common denominator\n D = np.matmul(v0.T, np.matmul(C_XX, v0))\n\n # L-by-1, output loading\n # this is the direction that leads to best prediction of Y using z\n q0 = np.matmul(C_YX, v0) / D\n q[:, i] = q0.flatten()\n\n # M-by-1 input loading\n # this is the direction that leads to most deflation in X\n p0 = np.matmul(C_XX, v0) / D\n p[:, i] = p0.flatten()\n\n # deflation.\n # let y' = y - q * z, x' = x - p * z\n # C_Y'X'\n # = E{y' * x'.T}\n # = (y - q*z) * (x - p*z).T\n # = y*x.T - q*z*x.T - y*z.T*p.T + q*z*z.T*p.T\n #\n # Plugging in z = v.T * x\n # = y*x.T - q*v.T*x*x.T - y*x.T*v*p.T + q*v.T*x*x.T*v*p.T\n # = C_YX - q*v.T*C_XX - C_YX*v*p.T + q*v.T*C_XX*v*P.T\n #\n # Noting that q * v.T * C_XX * v = C_YX * v\n # = C_YX - q*v.T*C_XX - q*v.T*C_XX*v*p.T + q*v.T*C_XX*v*p.T\n # = C_YX - q*v.T*C_XX\n # NOTE: this is different from the lecture notes\n #print(f\"===== iteration {i} =====\")\n #print(f\"C_YX =\\n{C_YX}\")\n #print(f\"C_XX =\\n{C_XX}\")\n #print(f\"p0 =\\n{p0}\")\n #print(f\"q0 =\\n{q0}\")\n #print(f\"v0 =\\n{v0}\")\n\n X1 = X1 - np.matmul(p0, z0)\n Y1 = Y1 - np.matmul(q0, z0)\n \"\"\"\n C_YX = C_YX - np.matmul(np.matmul(q0, v0.T), C_XX)\n C_XX = C_XX - np.matmul(np.matmul(p0, v0.T), C_XX)\n print(\"from iteration, C_YX =\\n\", C_YX)\n print(\"from deflation, C_YX =\\n\", np.matmul(Y1, X1.T) / (N - 1))\n \"\"\"\n # FIXME: maybe it's better to just deflate samples and re-compute covariance\n # at each iteration\n C_YX = np.matmul(Y1, X1.T) / (N - 1)\n C_XX = np.matmul(X1, X1.T) / (N - 1)\n C_YY = np.matmul(Y1, Y1.T) / (N - 1)\n C_YY_history[i + 1] = np.trace(C_YY) / C_YY_init_trace\n C_XX_history[i + 1] = np.trace(C_XX) / C_XX_init_trace\n #print(f\"iteration {i}\")\n #print(f\" tr(C_YY) = {np.trace(C_YY)}\")\n #print(f\" tr(C_XX) = {np.trace(C_XX)}\")\n\n\n return (v, p, q, C_YY_history * 100, C_XX_history * 100)\n\n\ndef compress(X_raw, v, p, M_star):\n \"\"\"\n :param\n :return X_compressed\n \"\"\"\n assert(v.shape[0] == X_raw.shape[0])\n assert(v.shape[0] == p.shape[0])\n assert(v.shape[1] >= M_star)\n assert(p.shape[1] >= M_star)\n\n # make a copy\n X = copy.deepcopy(X_raw)\n\n # record the dimension\n (M, N) = X.shape\n\n # compress using the first M_star components\n X_compressed = np.zeros((M, N))\n for i in range(M_star):\n # M-by-1, the most dominant direction\n v0 = v[:, i].reshape(-1, 1)\n\n # M-by-1, corresponding input loading vector\n p0 = p[:, i].reshape(-1, 1)\n\n # 1-by-N, input score\n z0 = np.matmul(v0.T, X).reshape(1, -1)\n\n # M-by-N, contribution from this component\n x0 = np.matmul(p0, z0)\n\n # update the compressed value\n X_compressed += x0\n\n # deflate input\n X = X - np.matmul(p0, z0)\n\n return X_compressed\n\n\ndef predict(X_raw, v, p, q, M_star):\n \"\"\"\n :return Y_predicted\n \"\"\"\n assert(v.shape[0] == X_raw.shape[0])\n assert(v.shape[0] == p.shape[0])\n assert(v.shape[1] >= M_star)\n assert(p.shape[1] >= M_star)\n assert(q.shape[1] >= M_star)\n\n # make a copy\n X = copy.deepcopy(X_raw)\n\n # record the dimension\n (M, N) = X.shape\n (L, _) = q.shape\n\n # predict using the first M_star components\n Y_predicted = np.zeros((L, N))\n for i in range(M_star):\n # M-by-1, the most dominant direction\n v0 = v[:, i].reshape(-1, 1)\n\n # M-by-1, corresponding input loading vector\n p0 = p[:, i].reshape(-1, 1)\n\n # L-by-1, corresponding output loading vector\n q0 = q[:, i].reshape(-1, 1)\n\n # 1-by-N, input score\n z0 = np.matmul(v0.T, X).reshape(1, -1)\n\n # L-by-N, contribution from this component\n y0 = np.matmul(q0, z0)\n\n # update the prediction\n Y_predicted += y0\n\n # deflate input\n X = X - np.matmul(p0, z0)\n\n return Y_predicted\n\n\nif (__name__ == \"__main__\"):\n import matplotlib.pyplot as plt\n\n print(\"Loading sample input and output\")\n X = np.loadtxt(\"inputdata.txt\", skiprows=1, dtype=float)\n Y = np.loadtxt(\"outputdata.txt\", skiprows=1, dtype=float)\n\n # now each row is a sample input / output\n # we need to transpose X and Y so that each col is a sample input / output\n X = X.T\n Y = Y.T\n\n # NOTE: No normalization\n print(\"Running PLSR decomposition\")\n (v, p, q, C_YY_history, C_XX_history) = decompose(X, Y, 3)\n\n M_star = 1\n print(\"Running PLSR compression\")\n X_compressed = compress(X, v, p, M_star)\n X_error = X - X_compressed\n rmse = np.sqrt(np.mean(np.multiply(X_error, X_error), axis=1))\n print(f\"Compression RMSE = {rmse}\")\n\n print(\"Running PLSR prediction\")\n Y_predicted = predict(X, v, p, q, M_star)\n Y_error = Y - Y_predicted\n rmse = np.sqrt(np.mean(np.multiply(Y_error, Y_error), axis=1))\n print(f\"Prediction RMSE = {rmse}\")\n\n plt.figure()\n for i in range(3):\n plt.subplot(3, 1, i + 1)\n plt.plot(X[i, :])\n plt.plot(X_compressed[i, :])\n plt.ylabel(f\"X{i+1}\")\n plt.legend([\"raw\", \"compressed\"])\n plt.suptitle(f\"Raw and compressed input ({M_star} components)\")\n\n plt.figure()\n for i in range(2):\n plt.subplot(3, 1, i + 1)\n plt.plot(Y[i, :])\n plt.plot(Y_predicted[i, :])\n plt.ylabel(f\"Y{i+1}\")\n plt.legend([\"raw\", \"predicted\"])\n plt.suptitle(f\"Raw and predicted output ({M_star} components)\")\n\n plt.figure()\n plt.plot(C_XX_history, \"x-\")\n plt.plot(C_YY_history, \"o-\")\n plt.legend([\"input\", \"output\"])\n plt.xlabel(\"component ID\")\n plt.ylabel(\"residual var (%)\")\n plt.title(\"Residual variance history\")\n\n plt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.linalg.svd", "matplotlib.pyplot.title", "numpy.multiply", "numpy.matmul", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.suptitle", "numpy.zeros", "numpy.trace", "numpy.loadtxt", "matplotlib.pyplot.figure" ] ]
alan-turing-institute/DSSG19-DNCP-PUBLIC
[ "1f8b037dd2fd79c9729ece970ba23a7b615acb74" ]
[ "notebooks/annajulia-objects-stats.py" ]
[ "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.1.7\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # Objects stats and concentration of complaints\n\n# +\n# Import Scripts\n# %reload_ext autoreload\n# %autoreload 2\nimport sys\nsys.path.insert(0, '../src/utils')\nfrom IPython.display import display, HTML\nimport matplotlib.pyplot as plt\nimport utils \n\n# Packages\nimport pandas as pd\npd.options.display.max_columns = 999\n# -\n\ncon = utils.connect_to_database()\n\ntab_path = 'data_output'\nuser_name = 'annajulia'\n\n# - Concentration table of procurement type\n# - Concentration of agencies\n# - Top20 agencies\n# - Concentration of product categories\n# - Top20 products\n# - Concentration of items\n# - Top20 items\n# - Concentration of type of agencies\n# - Top20 type of agencies\n# - Concentration of municipalities\n# - Top20 municipalities\n\n# ## Tenders\n\nquery = \"\"\"select id_llamado, convocante, tipo_entidad, categoria, tipo_procedimiento, etapa_licitacion, \nfecha_publicacion_convocatoria, monto_estimado_convocatoria, monto_total_adjudicado, moneda, vigencia_contrato, \nbool_of_effective_complaints, bool_of_amendments, number_of_amendments, \nbool_of_amendments_0, number_of_amendments_0, bool_of_amendments_1, number_of_amendments_1,\nbool_of_amendments_2, number_of_amendments_2\nfrom raw_labeled.proceso\"\"\"\n\ntenders = pd.read_sql_query(query, con)\n\ntenders.head()\n\n# ### Basic checks\n\ncheck_fields = ['convocante', 'categoria', 'tipo_procedimiento', 'tipo_entidad']\n\n# Define number of top elements we're interested in\nn_top = 20\n\nfor field in check_fields:\n unique_list = tenders[field].unique()\n grouped_df = tenders.groupby([field])['id_llamado'].agg(['count']).rename(columns={'count': 'n_tenders'})\\\n .sort_values('n_tenders', ascending=False)\n grouped_df['perc_tenders'] = grouped_df['n_tenders'] / grouped_df['n_tenders'].sum() * 100\n \n print(f\"-----------------\\nNumber of unique {field}: {len(unique_list)}\")\n print(f\"Number of documents by {field}\")\n display(HTML(grouped_df.head(10).to_html()))\n\n# ### Amount of tenders by agency\n\n# Basic facts and figures about distribution of tenders among agencies.\n\n# +\nagency_distr = tenders.groupby(['convocante'])['id_llamado'].nunique().to_frame().reset_index()\\\n.sort_values('id_llamado', ascending=False).rename(columns={'id_llamado': 'n_tenders'})\n\nagency_distr['perc_tenders'] = agency_distr['n_tenders'] / agency_distr['n_tenders'].sum() * 100\n\nagency_distr.head(10)\n\n# +\nnrows = 1 # Number of rows\nncols = 1 # Number of columns\nfontsize = 14 # General fontsize\n\nplot_df = agency_distr.head(n_top)\nplot_df['convocante'] = plot_df['convocante'].str.slice(stop=30)\n\n# Create a subplot grid\n# ax is a list with the dimensions [nrows][ncols]\nfig, ax = plt.subplots(nrows=nrows, ncols=ncols,\n figsize=(10, 4)) # (width, heigth)\nax.barh(plot_df['convocante'], plot_df['perc_tenders'], color='orange')\nax.set_xlabel( '% tenders', \n fontsize=fontsize)\nax.set_ylabel('Public Agency', \n fontsize=fontsize)\nax.set_title(f'Top {n_top} Public Agencies in total amount of tenders', fontsize=fontsize)\n\n#path = utils.path_to_shared('anna', 'imgs', 'top_20_agencies_tenders', 'png')\n# fig.savefig(path)\n# -\n\naccum_col_names = ['Number of tenders without complaints',\n 'Number of tenders with complaints',\n 'Total number of tenders',\n 'Complaint rate',\n 'Percentage of complaints (overall)',\n 'Cumulative percentage']\n\n# ### Accumulation of complaints in certain public agencies\n\n# Are complaints accumulated in certain agencies?\n\ngroupby_col = 'convocante'\nagency_compl = utils.calculate_bool_stats(df=tenders, groupby_col=groupby_col, \n bool_col='bool_of_effective_complaints', count_col='id_llamado')\n\n# Export table\n\n# +\nn = 10\ncol_names = [groupby_col.capitalize()] + accum_col_names\n\n#img_path = utils.path_to_shared(user_name, tab_path, f'top_{n}_agencies_concentration_complaints', 'csv')\n\noutput_df = agency_compl.head(n)\noutput_df.columns = col_names\n#output_df.to_csv(img_path, index=False)\noutput_df\n# -\n\n# **Insight**: 9 public agencies accumulate 25% of the total amount of complaints.\n\n# +\nplot_col = 'percentage'\n\nplot_df = agency_compl.sort_values(plot_col, ascending=False).head(n_top)\nplot_df[groupby_col] = plot_df[groupby_col].str.slice(stop=30)\n\nfig, ax = plt.subplots(nrows=nrows, ncols=ncols,\n figsize=(10, 4)) \nax.barh(plot_df[groupby_col], plot_df[plot_col])\nax.set_xlabel('% complaints', \n fontsize=fontsize)\nax.set_ylabel('Public Agency', \n fontsize=fontsize)\nax.set_title(f'Top {n_top} Public Agencies in {plot_col} of complaints', fontsize=fontsize)\n\n#path = utils.path_to_shared('anna', 'imgs', f'top_{n_top}_agencies_complaints', 'png')\n#fig.savefig(path)\n\n# +\nplot_col = 'n_bool_1'\n\nplot_df = agency_compl.sort_values(plot_col, ascending=False).head(n_top)\nplot_df[groupby_col] = plot_df[groupby_col].str.slice(stop=30)\n\nfig, ax = plt.subplots(nrows=nrows, ncols=ncols,\n figsize=(10, 4)) \nax.barh(plot_df[groupby_col], plot_df[plot_col])\nax.set_xlabel( 'Percentage of tenders with complaints', \n fontsize=fontsize)\nax.set_ylabel('Public agency', \n fontsize=fontsize)\nax.set_title(f'Top {n_top} public agencies in total of complaints', fontsize=fontsize)\n\n#path = utils.path_to_shared('anna', 'imgs', f'top_{n_top}_agencies_total_complaints', 'png')\n#fig.savefig(path)\n# -\n\n# ## Accumulation of complaints in certain tender categories\n\ngroupby_col = 'categoria'\ncategory_compl = utils.calculate_bool_stats(df=tenders, groupby_col=groupby_col, \n bool_col='bool_of_effective_complaints', \n count_col='id_llamado')\ncategory_compl.head()\n\n# +\nn = 10\ncol_names = [groupby_col.capitalize()] + accum_col_names\n\n#img_path = utils.path_to_shared(user_name, tab_path, f'top_{n}_tender_categories_concentration_complaints', 'csv')\n\noutput_df = category_compl.head(n)\noutput_df.columns = col_names\n#output_df.to_csv(img_path, index=False)\noutput_df\n\n# +\nplot_col = 'percentage'\n\nplot_df = category_compl.sort_values(plot_col, ascending=False).head(20)\nplot_df[groupby_col] = plot_df[groupby_col].str.slice(stop=30)\n\nfig, ax = plt.subplots(nrows=nrows, ncols=ncols,\n figsize=(10, 4)) \nax.barh(plot_df['categoria'], plot_df[plot_col])\nax.set_xlabel( 'Percentage of tenders with complaints', \n fontsize=fontsize)\nax.set_ylabel('Tender category', \n fontsize=fontsize)\nax.set_title('Top 20 tender categories in percentage of tender with complaints', fontsize=fontsize)\n\n#path = utils.path_to_shared('anna', 'imgs', f'top_{n_top}_categories_complaints', 'png')\n#fig.savefig(path)\n# -\n\n# ## Complaints by type of process\n\ngroupby_col = 'tipo_procedimiento'\ntype_tender = utils.calculate_bool_stats(df=tenders, groupby_col=groupby_col, \n bool_col='bool_of_effective_complaints', count_col='id_llamado')\ntype_tender.head()\n\n# +\nn = 10\ncol_names = [groupby_col.capitalize().replace('_', ' ')] + accum_col_names\n\noutput_df = type_tender.head(n)\noutput_df.columns = col_names\n#img_path = utils.path_to_shared(user_name, tab_path, f'top_{n}_tender_process_type_concentration_complaints', 'csv')\n#output_df.to_csv(img_path, index=False)\noutput_df\n# -\n\n# More than 50% of complaints refer to Licitacion Publica.\n\n# ### Accumulation of complaints by type of agency\n\ngroupby_col = 'tipo_entidad'\nagency_type = utils.calculate_bool_stats(df=tenders, groupby_col=groupby_col, \n bool_col='bool_of_effective_complaints', count_col='id_llamado')\n\n# +\ncol_names = [groupby_col.capitalize().replace('_', ' ')] + accum_col_names\n\noutput_df = agency_type\noutput_df.columns = col_names\n#img_path = utils.path_to_shared(user_name, tab_path, f'agency_type_concentration_complaints', 'csv')\n#output_df.to_csv(img_path, index=False)\noutput_df\n# -\n\n# ### Municipalities\n\ngroupby_col = 'convocante'\nmunicipality_tenders = tenders.query(\"tipo_entidad == 'Municipalidades'\")\nmunic_df = utils.calculate_bool_stats(df=municipality_tenders, groupby_col=groupby_col,\n bool_col='bool_of_effective_complaints', count_col='id_llamado')\n\n# +\nn = 10\ncol_names = [groupby_col.capitalize().replace('_', ' ')] + accum_col_names\n\noutput_df = munic_df.head(n)\noutput_df.columns = col_names\n#img_path = utils.path_to_shared(user_name, tab_path, f'top_{n}_municipalities_concentration_complaints', 'csv')\n#output_df.to_csv(img_path, index=False)\noutput_df\n# -\n\nyears = range(2009, 2020, 1)\nfor year in years:\n limit_date = f'{year}-01-01'\n aux_df = tenders.query(f\"fecha_publicacion_convocatoria > '{limit_date}'\")\n n_types = aux_df.tipo_procedimiento.nunique()\n print(f'For year {year}, number of unique types of procurement processes: {n_types}')\n\n# ## Amendments by public agency\n\nbool_col = 'bool_of_amendments'\ncount_col = 'id_llamado'\n\ngroupby_col = 'convocante'\nstats_df = utils.calculate_bool_stats(df=tenders, groupby_col=groupby_col, \n bool_col=bool_col, \n count_col=count_col)\nstats_df.head(n_top)\n\n# ## By category of process\n\ngroupby_col = 'categoria'\nstats_df = utils.calculate_bool_stats(df=tenders, groupby_col=groupby_col, \n bool_col=bool_col, \n count_col=count_col)\ntop_categories_0 = stats_df[groupby_col][:5]\nstats_df.head(5)\n\n# The first 5 categories of processes accumulate 50% of total amendments.\n\ngroupby_col = 'categoria'\nstats_df = utils.calculate_bool_stats(df=tenders, groupby_col=groupby_col, \n bool_col='bool_of_amendments_1', \n count_col=count_col)\ntop_categories_1 = stats_df[groupby_col][:5]\nstats_df.head(5)\n\n# +\ngroupby_col = 'categoria'\nstats_df = utils.calculate_bool_stats(df=tenders, groupby_col=groupby_col, \n bool_col='bool_of_amendments_2', \n count_col=count_col)\ntop_categories_2 = stats_df[groupby_col][:5]\n\nstats_df.head(5)\n# -\n\nlen(set(top_categories_0.tolist() + top_categories_1.tolist() + top_categories_2.tolist()))\n\n# ## Items\n\nquery = \"\"\"select id_llamado, producto_nombre_catalogo, precio_unitario_estimado, cantidad, unidad_medida, presentacion,\nfecha_publicacion, bool_of_effective_complaints, bool_of_amendments, bool_of_amendments_0, bool_of_amendments_1,\nbool_of_amendments_2\nfrom raw_labeled.item_solicitado\"\"\"\n\nitems_df = pd.read_sql_query(query, con)\n\nitems_df.head()\n\nprint(f\"Number of unique products: {items_df.producto_nombre_catalogo.nunique()}\")\n\ncount_col = 'id_llamado'\ngroupby_col = 'producto_nombre_catalogo'\nstats_df = utils.calculate_bool_stats(df=items_df, groupby_col=groupby_col, \n bool_col='bool_of_effective_complaints', \n count_col=count_col)\nstats_df.producto_nombre_catalogo.nunique()\n\n# +\nn = 10\ncol_names = [groupby_col.capitalize().replace('_', ' ')] + accum_col_names\n\noutput_df = stats_df.head(n)\noutput_df.columns = col_names\n#img_path = utils.path_to_shared(user_name, tab_path, f'top_{n}_product_concentration_complaints', 'csv')\n#output_df.to_csv(img_path, index=False)\noutput_df\n# -\n\nstats_df.head(10)\n\ngroupby_col = 'presentacion'\nstats_df = utils.calculate_bool_stats(df=items_df, groupby_col=groupby_col, \n bool_col='bool_of_effective_complaints', \n count_col=count_col)\n\nstats_df.head(10)\n\n\n" ]
[ [ "pandas.read_sql_query", "matplotlib.pyplot.subplots" ] ]
alexlib/partracking3D
[ "e3bb7aa48d20de8bb02a2f3549f07f3a411249f4" ]
[ "STMPython/STMFunctions.py" ]
[ "# 2017-04-12 included [0,0,0] in the default neighbours should not be too critical, just a backup measure.\r\nimport sys\r\nimport math\r\nimport collections as col\r\nimport numpy as np\r\nimport itertools as it\r\nimport copy\r\nimport datetime\r\n#import scipy.spatial as sps\r\n\r\n# Take a position p and a bunch of neighbours, to create p+n for each n in neighbours\r\ndef expandneighbours(p,neighbours):\r\n return(list(list(map(lambda x,y: x + y, p, k)) for k in neighbours))\r\n\r\n# Take positions p and a bunch of neighbours, to create p+n for each n in neighbours and for each p\r\ndef expandallneighbours(ps,neighbours):\r\n return(joinlists(list(expandneighbours(p,neighbours) for p in ps)))\r\n\r\n# 'Flatten' a list of lists to a single list\r\ndef joinlists(lst):\r\n return(list([value for sublst in lst for value in sublst]))\r\n\r\n# Custom sign function that gives either -1 or 1, also for input value 0\r\ndef Sgn(x):\r\n if (x<0):\r\n return(-1)\r\n else:\r\n return(1)\r\n\r\n# Division that spawns -infinity in case the denominator is 0\r\ndef SpecialDivision(a,b):\r\n if(b==0):\r\n return(-math.inf)\r\n else:\r\n return(a/b)\r\n\r\n# Mathematica style map with level specification\r\ndef maplevel(f, item, level):\r\n if level == 0:\r\n return f(item)\r\n else:\r\n return [maplevel(f, i, level - 1) for i in item]\r\n\r\n# Return unique elements of a list, works without second argument for simple elements. Lists-like elements require a lambda function lambda x:tuple(x)\r\ndef uniqify(seq, idfun=None):\r\n if idfun is None:\r\n def idfun(x): return x\r\n seen = {}\r\n result = []\r\n for item in seq:\r\n marker = idfun(item)\r\n if marker in seen: continue\r\n seen[marker] = 1\r\n result.append(item)\r\n return result\r\n\r\n# Gives index in which 'bin' the value n will fall with boundaries 'lst', -1 = outside\r\n# lst has to be sorted to make this work properly\r\n# Implemented with O(Log(N)) scaling, basic idea is to reduce by factors of 2 every time until 1 bin is left\r\n# Greedy: it will take the first bin if the right boundary exactly matches n\r\ndef PositionSorted(boundaries,n):\r\n lst = list(boundaries)\r\n lst[0] -= 10**-8\r\n lst[-1] += 10**-8\r\n mn = 0\r\n mx = len(lst)-1\r\n if (lst[mn] <= n <= lst[mx]):\r\n while (mx - mn > 1):\r\n #print('min',mn,'max',mx)\r\n trial = round((mn+mx)/2) # Banker's rounding but does not matter, still O(Log(N)) scaling\r\n if (n > lst[trial]):\r\n mn = trial\r\n else:\r\n mx = trial\r\n #print('Final: min',mn,'max',mx) \r\n return(mn)\r\n else:\r\n return(-1)\r\n\r\n# Euclidean distance of a list (faster than np.linalg.norm(y)!)\r\ndef VectorNorm(y):\r\n x = np.array(y)\r\n return(np.sqrt(x.dot(x)))\r\n\r\ndef SquareVectorNorm(y):\r\n x = np.array(y)\r\n return(x.dot(x))\r\n\r\ndef normalize(v):\r\n norm = VectorNorm(v)\r\n if norm==0:\r\n return(v)\r\n return(v/norm)\r\n\r\ndef ClosestPointToLines2(p,v):\r\n p1 = np.array(p[0])\r\n p2 = np.array(p[1])\r\n v1 = np.array(v[0])\r\n v2 = np.array(v[1])\r\n #a = np.dot(v1,v1) # Assuming v is normalized => length 1 \r\n b = 2*np.dot(p1-p2,v1)\r\n c = 2*np.dot(v1,v2)\r\n d = 2*np.dot(p2-p1,v2)\r\n #e = np.dot(v2,v2) # Assuming v is normalized => length 1 \r\n f = np.dot(p1,p1) + np.dot(p2,p2)\r\n #s = (2*a*d + b*c)/(c**2-4*a*e)\r\n s = (2*d + b*c)/(c**2-4) # Assuming v is normalized => a=e=1\r\n #t = (c*s - b)/(2*a)\r\n t = (c*s - b)/2 # Assuming v is normalized => a=e=1\r\n sol = (p1 + t*v1 + p2 + s*v2)/2\r\n #d1 = VectorNorm(np.cross(v1,p1-sol))/np.sqrt(a) # Assuming v is normalized => a=1\r\n d1 = VectorNorm(np.cross(v1,p1-sol))\r\n #d2 = np.linalg.norm(np.cross(sol-p1,sol-p1+v1))/np.linalg.norm(v1) # Must be the same as d1 for two lines!\r\n return([sol.tolist(),d1.item()])\r\n\r\ndef ClosestPointToLines(p,v):\r\n if(len(p)==2):\r\n return(ClosestPointToLines2(p,v))\r\n else:\r\n a = np.array(p)\r\n #d = np.array(list(map(normalize,v)))\r\n d = np.array(v) # Assuming v is normalized already\r\n length = len(p)\r\n rhs = np.array([0.0,0.0,0.0])\r\n lhs = length*np.identity(3)\r\n for i in range(length):\r\n rhs += a[i]-d[i]*np.dot(a[i],d[i])\r\n lhs -= np.outer(d[i],d[i])\r\n sol = np.linalg.solve(lhs, rhs)\r\n dists = list(map(lambda a,d: SquareVectorNorm(np.cross(sol - a, d)), a, d))\r\n dists = VectorNorm(dists)*np.sqrt(1/len(p))\r\n dists = dists.item() # Convert to regular float\r\n return([sol.tolist(),dists])\r\n\r\n# 3D dimensional voxel traversal, gives cell-indices back starting from point p and moving in v direction, subject two cell-bounds bounds\r\ndef DirectionalVoxelTraversal2(p,v,bounds,logfile=''):\r\n if len(p) == len(v) == len(bounds) == 3: # p, v, and bounds should be 3 dimensional\r\n curindex = list(map(PositionSorted,bounds,p))\r\n if (-1 in curindex or VectorNorm(v)==0):\r\n if(logfile != ''):\r\n flog = open(logfile, 'a')\r\n flog.write(\"ray starts outside bounds!\\np:\" + str(p) + \"\\nv:\" + str(v) + \"\\ncell index:\" + str(curindex) + \"\\n\")\r\n flog.close()\r\n print(\"ray starts outside bounds!\")\r\n print(\"p:\",p)\r\n print(\"v:\",v)\r\n print(\"cell index:\",curindex)\r\n return([])\r\n else:\r\n direction = list(map(Sgn,list(v)))\r\n relbounds = list(map(lambda a,b:list([i-b for i in a]),list(bounds),list(p)))\r\n times = list(map(lambda a,b:list([SpecialDivision(i,b) for i in a]),relbounds,v))\r\n times = list(map(lambda a,b:list([[i,b,False] for i in a]),times,[0,1,2]))\r\n for i in range(3):\r\n if(direction[i]==1):\r\n times[i][-1][2] = True \r\n else:\r\n times[i][0][2] = True \r\n times = joinlists(times)\r\n times = list(filter(lambda x: x[0]>0,times)) # Should be > !\r\n times = list(sorted(times,key=lambda x: x[0]))\r\n times = list(it.takewhile(lambda x: x[2]==False,times))\r\n times = list([x[1] for x in times])\r\n out = [copy.copy(curindex)]\r\n for index in times:\r\n new = list(out[-1]);\r\n new[index] += direction[index]\r\n out.append(list(new)) \r\n return(out)\r\n else:\r\n print(\"dimension mismatch!\")\r\n if(logfile != ''):\r\n flog = open(logfile, 'a')\r\n flog.write(\"dimension mismatch!\\n\")\r\n flog.close()\r\n return([])\r\n\r\n# 3D dimensional voxel traversal, gives cell-indices back starting from point p and moving in v direction, subject two cell-bounds bounds\r\ndef DirectionalVoxelTraversal(p,v,bounds,logfile=''):\r\n if len(p) == len(v) == len(bounds) == 3: # p, v, and bounds should be 3 dimensional\r\n curpos = list(p)\r\n cellindex = list(map(PositionSorted,bounds,p))\r\n if (-1 in cellindex or VectorNorm(v)==0):\r\n if(logfile != ''):\r\n flog = open(logfile, 'a')\r\n flog.write(\"ray starts outside bounds!\\np:\" + str(p) + \"\\nv:\" + str(v) + \"\\ncell index:\" + str(cellindex) + \"\\n\")\r\n flog.close()\r\n print(\"ray starts outside bounds!\")\r\n print(\"p:\",p)\r\n print(\"v:\",v)\r\n print(\"cell index:\",cellindex)\r\n return([])\r\n else:\r\n sgns = list(map(Sgn,v))\r\n sgnspart = list(map(lambda x: 1 if x == -1 else 0, sgns))\r\n \r\n cont = True\r\n steps = 0;\r\n out = [copy.copy(cellindex)]\r\n while(cont and steps < 10000): # Steps is just a safety thing for now, can be removed later\r\n steps += 1\r\n newindices = list(map(lambda x,y: x+y, cellindex, sgns))\r\n newbounds = [0,0,0]\r\n for i in range(3):\r\n if (0 <= newindices[i] <= len(bounds[i])-1): # Should there be + sgnspart[i] in the middle term?\r\n newbounds[i] = bounds[i][newindices[i] + sgnspart[i]]\r\n else:\r\n newbounds[i] = math.inf # Bounds are at infinity, so the 'next' bounds are infinitely far away\r\n \r\n if (math.inf not in newbounds):\r\n ts = [0,0,0]\r\n for i in range(3):\r\n if (v[i]==0):\r\n ts[i] = math.inf # It will take infinite amount of time\r\n else:\r\n ts[i] = (newbounds[i]-curpos[i])/v[i]\r\n \r\n order = sorted(range(len(ts)), key=lambda k: ts[k]) # Find the 'times' needed to the next boundaries in each dimensions\r\n minpos = order[0] # Find the dimension for which the time is shortest\r\n \r\n cellindex[minpos] += sgns[minpos]\r\n ts = ts[minpos]\r\n for i in range(3):\r\n curpos[i] += v[i]*ts\r\n \r\n out.append(copy.copy(cellindex))\r\n else:\r\n #print(\"at edge\")\r\n cont = False\r\n return(out)\r\n else:\r\n print(\"dimension mismatch!\")\r\n if(logfile != ''):\r\n flog = open(logfile, 'a')\r\n flog.write(\"dimension mismatch!\\n\")\r\n flog.close()\r\n return([])\r\n\r\ndef AtFace(bmin, bmax, hitb):\r\n return(bmin <= hitb <= bmax)\r\n\r\n# Projects ray (defined by p,v) on to an AABB (axis aligned bounding box).\r\n# [boolhit, boolinside, pnew, v] boolhit tells if it hits AABB, abd boolinside tells if it is projected, and pnew the new position or [] in case it misses.\r\n# Note that ray can be projected on to an AABB with negative 'time'...\r\ndef PrepareRay(p,v,bounds):\r\n xmin = bounds[0][0]\r\n xmax = bounds[0][1]\r\n ymin = bounds[1][0]\r\n ymax = bounds[1][1]\r\n zmin = bounds[2][0]\r\n zmax = bounds[2][1]\r\n x = p[0]\r\n y = p[1]\r\n z = p[2]\r\n newv = normalize(v).tolist() # v is normalized\r\n vx, vy, vz = newv\r\n if(xmin < x < xmax and ymin < y < ymax and zmin < z < zmax):\r\n return([True, True, p, newv]) # Return False and original point\r\n else:\r\n t = list(map(SpecialDivision,[xmin - x, xmax - x, ymin - y, ymax - y, zmin - z, zmax - z],[vx, vx, vy, vy, vz, vz]))\r\n ip = [0 for tmp in range(6)]\r\n for i in range(6):\r\n ti = t[i]\r\n if(abs(ti) == math.inf):\r\n ip[i] = [math.inf, math.inf, math.inf]\r\n else:\r\n ip[i] = [x + vx*ti, y + vy*ti, z + vz*ti]\r\n\r\n atfacex1 = AtFace(ymin, ymax, ip[0][1]) and AtFace(zmin, zmax, ip[0][2])\r\n atfacex2 = AtFace(ymin, ymax, ip[1][1]) and AtFace(zmin, zmax, ip[1][2])\r\n atfacey1 = AtFace(xmin, xmax, ip[2][0]) and AtFace(zmin, zmax, ip[2][2])\r\n atfacey2 = AtFace(xmin, xmax, ip[3][0]) and AtFace(zmin, zmax, ip[3][2])\r\n atfacez1 = AtFace(xmin, xmax, ip[4][0]) and AtFace(ymin, ymax, ip[4][1])\r\n atfacez2 = AtFace(xmin, xmax, ip[5][0]) and AtFace(ymin, ymax, ip[5][1])\r\n data = [t, [atfacex1, atfacex2, atfacey1, atfacey2, atfacez1, atfacez2], ip]\r\n data = list(zip(*data)) # Data will be a list of lists, each having the form: time-till-hit, hits face (boolean), position it hits a plane\r\n data = list(filter(lambda xx: xx[1]==True, data)) # Select only those that hit a face #don't change to 'is True' numpy bool possibility\r\n if(len(data)>0):\r\n data = sorted(data, key = lambda x: x[0]) # Sort by arrival time (time till hit)\r\n return([True, False, data[0][2], newv]) # Position it hits the plane of first-hit\r\n else:\r\n return([False, False, [], newv])\r\n \r\n \r\n \r\ndef SpaceTraversalMatching(raydata, boundingbox, nx = 75, ny = 75, nz = 75, cammatchfunc = lambda x: len(x)>2, maxmatchesperray = 2, maxdistance = 999.9, neighbours = 6, logfile = ''):\r\n\r\n if(len(boundingbox)==3 and len(boundingbox[0]) == 2 and len(boundingbox[1]) == 2 and len(boundingbox[2]) == 2 and nx >= 5 and ny >= 5 and nz >= 5):\r\n \r\n if(neighbours == 0):\r\n neifhbours = [[0,0,0]]\r\n elif(neighbours == 6):\r\n neighbours = [[-1,0,0],[0,-1,0],[0,0,-1],[0,0,1],[0,1,0],[1,0,0],[0,0,0]]\r\n elif(neighbours == 18):\r\n neighbours = [[-1,-1,0],[-1,0,-1],[-1,0,0],[-1,0,1],[-1,1,0],[0,-1,-1],[0,-1,0],[0,-1,1],[0,0,-1],[0,0,1],[0,1,-1],[0,1,0],[0,1,1],[1,-1,0],[1,0,-1],[1,0,0],[1,0,1],[1,1,0],[0,0,0]]\r\n elif(neighbours == 26):\r\n neighbours = [[-1,-1,-1],[-1,-1,0],[-1,-1,1],[-1,0,-1],[-1,0,0],[-1,0,1],[-1,1,-1],[-1,1,0],[-1,1,1],[0,-1,-1],[0,-1,0],[0,-1,1],[0,0,-1],[0,0,1],[0,1,-1],[0,1,0],[0,1,1],[1,-1,-1],[1,-1,0],[1,-1,1],[1,0,-1],[1,0,0],[1,0,1],[1,1,-1],[1,1,0],[1,1,1],[0,0,0]]\r\n if(type(neighbours) == list):\r\n if(logfile != ''):\r\n def LOGprint(*out):\r\n flog = open(logfile, 'a')\r\n flog.write(\" \".join(map(str,list(out))) + \"\\n\")\r\n flog.close()\r\n else:\r\n def LOGprint(*out):\r\n print(*out)\r\n\r\n LOGprint(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\r\n bounds = list(map(lambda x, n: list([x[0]+i*(x[1]-x[0])/n for i in range(n+1)]),boundingbox,[nx,ny,nz]))\r\n for i in range(3):\r\n bounds[i][-1] = boundingbox[i][-1]\r\n \r\n LOGprint(\"# of cells:\",nx*ny*nz)\r\n rays = list(raydata)\r\n\r\n # Prepare the rays so that they are inside the box or on the side.\r\n cammarkerfunc = lambda x: x[0] # First element is camera ID\r\n raymarkerfunc = lambda x: x[1] # Second element is the ray ID\r\n raydb = {} # Store a dictionary of (cameraid, rayid): [pos, direction]\r\n validrays = [] # Store the transformed rays \r\n numrays = col.Counter() # Store the number of rays per camera\r\n invalidcounter = col.Counter() # Store the number that are invalid\r\n for r in rays:\r\n camid = cammarkerfunc(r)\r\n rayid = raymarkerfunc(r)\r\n numrays[camid] += 1\r\n\r\n pp = list(r[2:5])\r\n vv = list(r[5:8])\r\n out = PrepareRay(pp, vv, boundingbox)\r\n\r\n if(out[0]): # If it does not miss (hit or inside)\r\n raydb[(camid, rayid)] = [out[2], out[3]] # Tuple of cam-ray ids\r\n validrays.append([camid, rayid, out[1], out[2], out[3]]) # CamID, rayID, bool inside, pnew, v \r\n else:\r\n #raydb[(camid, rayid)] = [copy.copy(pp), copy.copy(vv),\"not used\"] # Store in raydb even if it misses the bounding box, mark them\r\n invalidcounter[camid] += 1\r\n\r\n #for k, v in raydb.items():\r\n # print(VectorNorm(v[1]))\r\n \r\n \r\n LOGprint(\"# of rays for each camera:\", dict(numrays),\"\\n# of rays that miss the bounding box:\", dict(invalidcounter))\r\n if(len(dict(numrays))>10):\r\n LOGprint(\"# of cameras is large:\", len(dict(numrays)) ,\" Double check the input!\")\r\n return([])\r\n \r\n\r\n traversed = []\r\n for ray in validrays:\r\n if(ray[2]): # Ray is inside, traverse both forward and backward\r\n out = DirectionalVoxelTraversal2(list(ray[3]),list(ray[4]),bounds,logfile) + \\\r\n DirectionalVoxelTraversal2(list(ray[3]),list(map(lambda x: -1*x, ray[4])),bounds,logfile)\r\n else: # Ray is at edge, traverse in forward direction only\r\n out = DirectionalVoxelTraversal2(list(ray[3]),list(ray[4]),bounds,logfile)\r\n\r\n out = uniqify(expandallneighbours(out,neighbours),lambda x: tuple(x)) # Expand in neighbourhood and remove duplicates\r\n ext = [[ray[0],ray[1],o] for o in out] # Creates long list of camid, rayid, cellindex\r\n traversed.extend(ext) # Pile up these into traversed\r\n\r\n LOGprint(\"# of voxels traversed after expansion:\", len(traversed))\r\n cellfunc = lambda x: x[2]\r\n traversed = list(sorted(traversed,key=cellfunc)) # Sort based on cell. Sort is needed before groupby\r\n traversed = [list(g) for k, g in it.groupby(traversed, cellfunc)] # Group elements by same cell\r\n LOGprint(\"Sorted and grouped by cell index. # of groups:\",len(traversed))\r\n traversed = list(filter(cammatchfunc,traversed)) # Prune based on number of rays (fast rough filter, cam filter later)\r\n LOGprint(\"Rough pruned based on number of cameras:\",len(traversed))\r\n traversed = maplevel(lambda x:[x[0],x[1]], traversed, 2) # Remove cellindex, not needed anymore, leave [camid, rayid]\r\n traversed = list(map(lambda x:[list(g) for k, g in it.groupby(x, cammarkerfunc)],traversed))\r\n #LOGprint(\"Cell index removed and grouped by camera for each cell\")\r\n traversed = list(filter(cammatchfunc,traversed)) # Prune based on number of different cameras\r\n LOGprint(\"Pruned based on number of cameras:\",len(traversed))\r\n candidates = copy.copy(list(map(lambda x:list(list(tup) for tup in it.product(*x)),traversed))) # All combinations between all cameras\r\n candidates = joinlists(candidates) # Flatten a list of lists to a single list\r\n LOGprint(\"Flattened list of candidates:\",len(candidates))\r\n candidates = uniqify(candidates,lambda x: tuple(list(joinlists(x)))) # Delete duplicates, flattened list as tag\r\n LOGprint(\"Duplicate candidates removed:\",len(candidates))\r\n candidates = sorted(candidates)\r\n\r\n \r\n #LOGprint(\"Computing match position and quality of candidates...\")\r\n newcandidates = []\r\n for c in candidates:\r\n pvdata = list([raydb[tuple(x)] for x in c])\r\n pdata = list([x[0] for x in pvdata])\r\n vdata = list([x[1] for x in pvdata]) \r\n out = ClosestPointToLines(pdata,vdata)\r\n newcandidates.append([c,list(out[0]),out[1]])\r\n \r\n #hullpts = [[20.0,5.0,165.0],[20.0,10.0,160.0],[20.0,10.0,165.0],[20.0,15.0,160.0],[20.0,15.0,165.0],[20.0,20.0,160.0],[20.0,20.0,165.0],[20.0,25.0,160.0],[20.0,25.0,165.0],[25.0,0.0,165.0],[25.0,0.0,170.0],[25.0,5.0,155.0],[25.0,10.0,155.0],[25.0,15.0,155.0],[25.0,20.0,155.0],[25.0,25.0,155.0],[25.0,25.0,175.0],[25.0,30.0,170.0],[25.0,35.0,155.0],[25.0,35.0,160.0],[30.0,-5.0,170.0],[30.0,0.0,160.0],[30.0,5.0,150.0],[30.0,10.0,150.0],[30.0,15.0,150.0],[30.0,20.0,150.0],[30.0,25.0,150.0],[30.0,25.0,180.0],[30.0,30.0,150.0],[30.0,30.0,180.0],[30.0,35.0,175.0],[30.0,40.0,165.0],[30.0,45.0,155.0],[35.0,-10.0,175.0],[35.0,-5.0,165.0],[35.0,-5.0,180.0],[35.0,0.0,155.0],[35.0,10.0,145.0],[35.0,15.0,145.0],[35.0,20.0,145.0],[35.0,25.0,185.0],[35.0,30.0,185.0],[35.0,35.0,150.0],[35.0,35.0,185.0],[35.0,40.0,180.0],[35.0,45.0,155.0],[35.0,45.0,170.0],[35.0,50.0,160.0],[40.0,-10.0,175.0],[40.0,-10.0,180.0],[40.0,-5.0,165.0],[40.0,0.0,155.0],[40.0,5.0,145.0],[40.0,10.0,145.0],[40.0,15.0,145.0],[40.0,30.0,150.0],[40.0,40.0,180.0],[40.0,45.0,155.0],[40.0,45.0,175.0],[40.0,50.0,160.0],[40.0,50.0,165.0],[45.0,-5.0,175.0],[45.0,0.0,165.0],[45.0,5.0,155.0],[45.0,10.0,150.0],[45.0,15.0,150.0],[45.0,40.0,180.0],[45.0,50.0,160.0],[45.0,50.0,165.0],[50.0,15.0,160.0],[50.0,20.0,160.0],[50.0,25.0,175.0],[50.0,30.0,175.0],[50.0,35.0,175.0],[50.0,45.0,165.0],[55.0,15.0,170.0],[55.0,20.0,170.0],[55.0,25.0,170.0],[55.0,30.0,170.0],[55.0,35.0,170.0],[55.0,40.0,170.0]];\r\n #delaun = sps.Delaunay(hullpts) # Define the Delaunay triangulation \r\n #inq = delaun.find_simplex([x[1] for x in newcandidates])>0\r\n #inq = inq.tolist();\r\n #print(inq)\r\n #print(type(inq))\r\n #newcandidates = list(map(lambda x,y: x + [y],newcandidates,inq))\r\n\r\n candidates = copy.copy(newcandidates);\r\n del newcandidates\r\n #LOGprint(\"Sorting candidate matches by quality of match...\")\r\n candidates = sorted(candidates,key=lambda x: (-len(x[0]), x[2])) #sort by number of cameras then by error\r\n #LOGprint(\"Here are upto 9999 of the best matches:\")\r\n #LOGprint(\"Index, [camid rayid ....] Position, Mean square distance\")\r\n #print(\"num candidates:\",len(candidates))\r\n #for i in range(1,len(candidates),100):\r\n # print(i,candidates[i])\r\n\r\n LOGprint(\"Selecting the best matches with upto\",maxmatchesperray,\"match(es)/ray out of\",len(candidates),\"candidates\")\r\n #now we want to pick the best matches first and match each ray at most maxmatchesperray\r\n approvedmatches = [] # Store approved candidates\r\n matchcounter = col.Counter() # Keep track of how many they are matched\r\n for cand in candidates:\r\n if(cand[2] < maxdistance):\r\n valid = True;\r\n for idpair in cand[0]:\r\n if(matchcounter[tuple(idpair)]>=maxmatchesperray):\r\n valid = False\r\n #print(idpair,\"has been matched already\",maxmatchesperray,\"time(s)\")\r\n break\r\n if(valid==True):\r\n for idpair in cand[0]:\r\n matchcounter[tuple(idpair)] += 1\r\n\r\n approvedmatches.append(list(cand))\r\n\r\n LOGprint(\"Selecting done.\",len(approvedmatches),\"matched found (out of\",len(candidates),\"candidates)\")\r\n #print(\"Here are the approved matches:\")\r\n #print(\"Index, [camid rayid ....] Position, Mean square distance\") \r\n \r\n return(approvedmatches)\r\n else:\r\n print(\"Neighbours should be a 0, 6, 18, or 26 or a list of triplets:\",neighbours) \r\n return([])\r\n else:\r\n print(\"Something went wrong:\")\r\n print(\"Bounding box should be of the form [[xmin,xmax],[ymin,ymax],[zmin,zmax]]\",boundingbox)\r\n print(\"nx,ny,nz should be >=5 otherwise you will do a lot of comparisons!\",[nx,ny,nz]) \r\n return([])\r\n \r\n" ]
[ [ "numpy.dot", "numpy.linalg.solve", "numpy.identity", "numpy.cross", "numpy.outer", "numpy.array" ] ]
manvhah/pyksvd
[ "2b17a2892c0cdcc7a53b7ddbadc764b0af7a8dd9" ]
[ "setup.py" ]
[ "\n#!/usr/bin/env python\n\n################################################################################\n# All the control parameters should go here\n\nsource_directory_list = ['ksvd']\ncompiler_args = []\nlink_args = []\nversion = \"0.01\"\ndescription=\"Implementation of the K-SVD algorithm.\" \nauthor = \"Hoyt Koepke\"\nauthor_email=\"[email protected]\"\nname = 'pyksvd'\nscripts = []\nurl = \"http://www.stat.washington.edu/~hoytak/code/pyksvd/\"\ndownload_url = \" \"\n\nlong_description = \\\n\"\"\"\n\"\"\"\n\nclassifiers = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Cython',\n 'Programming Language :: C++',\n 'Topic :: Scientific/Engineering',\n ]\n\n# Stuff for extension module stuff\nextra_library_dirs = []\nextra_include_dirs = []\n\n# Need to find cplex together \n\nlibrary_includes = ['gomp']\nspecific_libraries = {}\n\ncompiler_args += ['-march=native', '-fopenmp',\n '--std=c++0x', '-O2',\n '-funroll-loops',\n # '-DNO_UNROLL_BATCHOMP_SOLVER',\n '-ffast-math',\n '-funsafe-math-optimizations',\n '-ffinite-math-only'\n ]\n\n################################################################################\n# Shouldn't have to adjust anything below this line...\n\nfrom glob import glob\nimport os\nfrom os.path import split, join\nfrom itertools import chain, product\nimport sys\n\nimport numpy\nextra_include_dirs += [numpy.get_include()]\n\nfrom distutils.core import setup\nfrom distutils.extension import Extension\n\n######################################################\n# First have to see if we're authorized to use cython files, or if we\n# should instead compile the included files\n\ncython_mode = True\n\nif \"--debug\" in sys.argv:\n debug_mode_c_code = True\n del sys.argv[sys.argv.index(\"--debug\")]\nelse:\n debug_mode_c_code = False\n\n# Get all the cython files in the sub directories and in this directory\nif cython_mode:\n cython_files = dict( (d, glob(join(d, \"*.pyx\"))) for d in source_directory_list + ['.'])\nelse:\n cython_files = {}\n\nall_cython_files = set(chain(*cython_files.values()))\n\nprint(\"+++++++++++++++++++\")\n\nif cython_mode:\n print(\"Cython Files Found: \\n%s\\n+++++++++++++++++++++\" % \",\\\n \".join(sorted(all_cython_files)))\nelse:\n print(\"Cython support disabled; compiling extensions from pregenerated C\\\n sources.\")\n print(\"To enable cython, run setup.py with the option --cython.\")\n print(\"+++++++++++++++++++\")\n\n# Set the compiler arguments -- Add in the environment path stuff\nld_library_path = os.getenv(\"LD_LIBRARY_PATH\")\n\nif ld_library_path is not None:\n lib_paths = ld_library_path.split(\":\")\nelse:\n lib_paths = []\n\ninclude_path = os.getenv(\"INCLUDE_PATH\")\nif include_path is not None:\n include_paths = [p.strip() for p in include_path.split(\":\") if len(p.strip()) > 0]\nelse:\n include_paths = []\n\n\n# get all the c files that are not cythonized .pyx files.\nc_files = dict( (d, [f for f in glob(join(d, \"*.c\"))\n if (f[:-2] + '.pyx') not in all_cython_files])\n for d in source_directory_list + ['.'])\n\nfor d, l in chain(((d, glob(join(d, \"*.cxx\"))) for d in source_directory_list + ['.']),\n ((d, glob(join(d, \"*.cpp\"))) for d in source_directory_list + ['.'])):\n c_files[d] += l\n\n\nprint(\"C Extension Files Found: \\n%s\\n+++++++++++++++++++++\" % \",\\\n \".join(sorted(chain(*c_files.values()))))\n\n# Collect all the python modules\ndef get_python_modules(f):\n d, m = split(f[:f.rfind('.')])\n return m if len(d) == 0 else d + \".\" + m\n\nexclude_files = set([\"setup.py\"])\npython_files = set(chain(* (list(glob(join(d, \"*.py\")) for d in source_directory_list) + [glob(\"*.py\")]))) \npython_files -= exclude_files\n\npython_modules = [get_python_modules(f) for f in python_files]\n\nprint(\"Relevant Python Files Found: \\n%s\\n+++++++++++++++++++++\" % \",\\\n \".join(sorted(python_files)))\n\nif __name__ == '__main__':\n # The rest is also shared with the setup.py file, in addition to\n # this one, so \n\n def strip_empty(l):\n return [e.strip() for e in l if len(e.strip()) != 0]\n\n def get_include_dirs(m):\n return strip_empty(extra_include_dirs + include_paths)\n\n def get_library_dirs(m):\n return strip_empty(extra_library_dirs + lib_paths)\n\n def get_libraries(m):\n return strip_empty(library_includes + (specific_libraries[m] if m in specific_libraries else []))\n \n def get_extra_compile_args(m):\n return strip_empty(compiler_args + (['-g', '-UNDEBUG']\n if debug_mode_c_code\n else ['-DNDEBUG']))\n \n def get_extra_link_args(m):\n return strip_empty(link_args + (['-g'] if debug_mode_c_code else []))\n\n\n ############################################################\n # Cython extension lists\n\n def makeExtensionList(d, filelist):\n ext_modules = []\n\n for f in filelist:\n f_no_ext = f[:f.rfind('.')]\n f_mod = split(f_no_ext)[1]\n modname = \"%s.%s\" % (d, f_mod) if d != '.' else f_mod\n \n ext_modules.append(Extension(\n modname,\n [f],\n include_dirs = get_include_dirs(modname),\n library_dirs = get_library_dirs(modname),\n language = \"c++\",\n libraries = get_libraries(modname),\n extra_compile_args = get_extra_compile_args(modname),\n extra_link_args = get_extra_link_args(modname),\n ))\n\n return ext_modules\n\n ############################################################\n # Now get all these ready to go\n\n ext_modules = []\n\n if cython_mode:\n from Cython.Distutils import build_ext\n\n ext_modules += list(chain(*list(makeExtensionList(d, l) \n for d, l in cython_files.items())))\n \n cmdclass = {'build_ext' : build_ext}\n else:\n cmdclass = {}\n\n ext_modules += list(chain(*list(makeExtensionList(d, l)\n for d, l in c_files.items())))\n setup(\n version = version,\n description = description,\n author = author, \n author_email = author_email,\n name = name,\n cmdclass = cmdclass,\n ext_modules = ext_modules,\n py_modules = python_modules,\n scripts = scripts,\n classifiers = classifiers,\n url = url,\n download_url = download_url)\n" ]
[ [ "numpy.get_include" ] ]
LSSTDESC/ImageProcessingPipelines
[ "55eac5471fbc90fae884d8723e201da4d1bdffea" ]
[ "workflows/srs/pipe_scripts/run_makeFpSummary.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\n.. _run_makeFpSummary:\n\nRun makeFpSummary.py for a list of visits\n=========================================\n\"\"\"\n\nfrom __future__ import print_function\nimport os\nimport glob\nimport numpy as N\nimport libRun as LR\n\n\n__author__ = 'Nicolas Chotard <[email protected]>'\n__version__ = '$Revision: 1.0 $'\n\n\ndef build_cmd(visits, filt, num, input='pardir/output', output='pardir/output'):\n\n if not os.path.isdir(\"scripts/\" + filt):\n os.makedirs(\"scripts/\" + filt)\n\n filename = \"scripts/\" + filt + \"/visits_\" + str(num) + \".list\"\n visits = [\"--id visit=%s\" % str(v) for v in visits]\n N.savetxt(filename, visits, fmt=\"%s\")\n\n cmd = \"\"\n # Create the command line\n if opts.time:\n cmd += \"time \"\n cmd += \"makeFpSummary.py %s --output %s --dstype calexp @\" % (output, output) + \\\n filename\n if opts.showconfig:\n cmd += \" --show=config\"\n if opts.clobberversions:\n cmd += \" --clobber-versions\"\n print(\"\\nCMD: \", cmd)\n\n return cmd\n\n \nif __name__ == \"__main__\":\n\n usage = \"\"\"%prog [option]\"\"\"\n\n description = \"\"\"This script will run makeFpSummary for a given list of filters and visits. The \n default if to use f.list files (where 'f' is a filter in ugriz), and launch makeFpSummary in \n several batch jobs. To run all filters, you can do something like \n \n %prog -f ugriz -m 1 -c processConfig.py -a\n\n \"\"\"\n\n opts, args = LR.standard_options(usage=usage, description=description)\n\n # Loop over filters\n for filt in opts.filters:\n\n config = LR.select_config(opts.configs, filt)\n\n # Are there visits to load\n if not os.path.exists(filt+\".list\"):\n print(\"WARNING: No file (no visit) for filter\", filt)\n continue\n\n # Get the list of visits\n dataids = N.loadtxt(filt+\".list\", dtype='str')\n visits = list(set([dataid[1].split('=')[1] for dataid in dataids]))\n print(\"INFO: %i visits loaded: \" % len(visits), visits)\n\n # How many jobs should we be running (and how many visit in each?)?\n njobs = LR.job_number(visits, opts.mod, opts.max)\n\n # Reorganize the visit list in sequence\n visits = LR.organize_items(visits, njobs)\n\n # Loop over the visit file\n numscript = 1\n for i, visit in enumerate(visits):\n cmd = build_cmd(visit, filt, i, input=opts.output, output=opts.output)\n\n # Only submit the job if asked\n prefix = \"visit_makeFpSummary_%03d_script\" % numscript\n LR.submit(cmd, prefix, filt, autosubmit=opts.autosubmit,\n ct=opts.ct, vmem=opts.vmem, queue=opts.queue,\n system=opts.system, otheroptions=opts.otheroptions,\n from_slac=opts.fromslac, from_nersc=opts.fromnersc)\n numscript += 1 \n\n if not opts.autosubmit:\n print(\"\\nINFO: Use option --autosubmit to submit the jobs\")\n" ]
[ [ "numpy.savetxt", "numpy.loadtxt" ] ]
elma16/floe
[ "840c78758035af541374607b2686df9ac5d11ee0" ]
[ "examples/evp-error-conv.py" ]
[ "from seaice import *\nfrom firedrake import *\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\n\nplt.rcParams.update({\"font.size\": 12})\n\npath = \"./output/evp-error-conv\"\nPath(path).mkdir(parents=True, exist_ok=True)\n\n\"\"\"\nTEST 2 : EVP Error Convergence\n\nManufactured solutions.\nCoriolis force neglected, no forcing due to wind. \nForcing due to ocean is present.\nAdvection turned off.\nBoundary conditions : u = 0\nInitial conditions : h = 1, A = x / L\nDomain is a 500km x 500km square.\n\"\"\"\n\ntimestep = 0.1\ndumpfreq = 10 ** 3\ntimescale = 1\n\nzero = Constant(0)\n\nnorm_type = \"L2\"\n\ntitle = \"EVP Plot\"\ndiagnostic_dirname = path + \"/evp.nc\"\nplot_dirname = path + \"/evp_error_timescale={}_timestep={}_{}u.pdf\".format(\n timescale, timestep, norm_type\n)\ndirname = path + \"/u_timescale={}_timestep={}.pvd\".format(timescale, timestep)\n\nlength = 5 * 10 ** 5\npi_x = pi / length\n\nnumber_of_triangles = [5, 10, 20, 40, 80, 160]\n\nerror_values = []\n\nstabilised = {\"state\": False, \"alpha\": 1}\ntimestepping = TimesteppingParameters(timescale=timescale, timestep=timestep)\noutput = OutputParameters(dirname=dirname, dumpfreq=dumpfreq)\nsolver = SolverParameters()\nparams = SeaIceParameters(rho_a=zero, C_a=zero, cor=zero)\n\nfor values in number_of_triangles:\n\n mesh = SquareMesh(values, values, length)\n x, y = SpatialCoordinate(mesh)\n v_exp = as_vector([-sin(pi_x * x) * sin(pi_x * y), -sin(pi_x * x) * sin(pi_x * y)])\n sigma_exp = as_matrix(\n [\n [-sin(pi_x * x) * sin(pi_x * y), -sin(pi_x * x) * sin(pi_x * y)],\n [-sin(pi_x * x) * sin(pi_x * y), -sin(pi_x * x) * sin(pi_x * y)],\n ]\n )\n\n ocean_curr = as_vector(\n [0.1 * (2 * y - length) / length, -0.1 * (length - 2 * x) / length]\n )\n\n ic = {\"u\": v_exp, \"a\": 1, \"h\": 1, \"s\": sigma_exp}\n\n conditions = Conditions(\n ic=ic, ocean_curr=ocean_curr, stabilised=stabilised, family=\"CR\"\n )\n\n evp = ElasticViscousPlastic(\n mesh=mesh,\n conditions=conditions,\n timestepping=timestepping,\n output=output,\n params=params,\n solver_params=solver,\n )\n\n u1, s1 = split(evp.w1)\n u0, s0 = split(evp.w0)\n\n theta = 0.5\n uh = (1 - theta) * u0 + theta * u1\n sh = (1 - theta) * s0 + theta * s1\n\n eqn = inner(params.rho * evp.h * (u1 - u0), evp.p) * dx\n eqn += timestep * inner(sh, grad(evp.p)) * dx\n eqn -= (\n timestep\n * inner(\n params.rho_w\n * params.C_w\n * sqrt(dot(ocean_curr - uh, ocean_curr - uh))\n * (ocean_curr - uh),\n evp.p,\n )\n * dx\n )\n\n # source terms in momentum equation\n eqn += timestep * inner(div(sigma_exp), evp.p) * dx\n eqn += (\n timestep\n * inner(\n params.rho_w\n * params.C_w\n * sqrt(dot(ocean_curr - v_exp, ocean_curr - v_exp))\n * (ocean_curr - v_exp),\n evp.p,\n )\n * dx\n )\n\n zeta_exp = evp.zeta(evp.h, evp.a, evp.delta(v_exp))\n ep_dot_exp = evp.strain(grad(v_exp))\n rheology_exp = params.e ** 2 * sigma_exp + Identity(2) * 0.5 * (\n (1 - params.e ** 2) * tr(sigma_exp) + evp.Ice_Strength(evp.h, evp.a)\n )\n zeta = evp.zeta(evp.h, evp.a, evp.delta(uh))\n\n eqn += inner(s1 - s0 + 0.5 * timestep * evp.rheology / params.T, evp.q) * dx\n eqn -= inner(evp.q * zeta * timestep / params.T, evp.ep_dot) * dx\n\n # source terms in rheology\n eqn -= inner(0.5 * timestep * rheology_exp / params.T, evp.q) * dx\n eqn += inner(evp.q * zeta_exp * timestep / params.T, ep_dot_exp) * dx\n\n evp.assemble(eqn, evp.w1, evp.bcs, solver.srt_params)\n\n diag = OutputDiagnostics(description=\"test 1\", dirname=diagnostic_dirname)\n\n t = 0\n\n w = Function(evp.V, name=\"Exact Solution Vector\").interpolate(v_exp)\n x = Function(evp.S, name=\"Exact Solution Tensor\").interpolate(sigma_exp)\n\n u1, s1 = evp.w1.split()\n\n evp.dump(u1, s1, w, x, t=0)\n\n while t < timescale - 0.5 * timestep:\n u0, s0 = evp.w0.split()\n evp.solve(evp.usolver)\n evp.update(evp.w0, evp.w1)\n diag.dump(evp.w1, t=t)\n t += timestep\n evp.dump(u1, s1, w, x, t=t)\n evp.progress(t)\n #print(Error.compute(u1, v_exp, norm_type))\n #print(Error.compute(s1, sigma_exp, norm_type)/norm(sigma_exp, norm_type))\n\n error_values.append(Error.compute(u1, v_exp, norm_type))\n #norm(sigma_exp, norm_type))\n\nh = [sqrt(2) * length / x for x in number_of_triangles]\nhsq = [10**-6 * x**2 for x in h]\nhd = [10**-3 * x for x in h]\nerror_slope = float(format(np.polyfit(np.log(h), np.log(error_values), 1)[0], \".3f\"))\n\nprint(error_slope)\n\n#plt.title(\"EVP Error Convergence\")\nplt.xlabel(r\"h\")\nplt.ylabel(r\"$L^2$ Error\".format(norm_type))\nplt.loglog(h, error_values, \"-o\", label=\"$L^2$ error\")\nplt.loglog(h, hsq, label=\"$h^2$\")\n#plt.loglog(h, hd, label=\"$h$\")\nplt.legend(loc='best')\nplt.savefig(plot_dirname)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.loglog", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.rcParams.update" ] ]
kshithijiyer/TensorNetwork
[ "bf47f8635eca33edf95c73d50d48d861f628aaec" ]
[ "tensornetwork/tensornetwork_test.py" ]
[ "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\n# Prepare for TF 2.0 migration\ntf.enable_v2_behavior()\n# pylint: disable=g-import-not-at-top\nfrom tensornetwork import tensornetwork\n\n\nclass NetworkTest(tf.test.TestCase):\n\n def test_sanity_check(self):\n net = tensornetwork.TensorNetwork()\n net.add_node(np.eye(2), \"a\")\n net.check_correct()\n\n def test_node_names(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2), \"a\", axis_names=[\"e0\", \"e1\"])\n self.assertEqual(a.name, \"a\")\n self.assertEqual(a[0].name, \"e0\")\n self.assertEqual(a[1].name, \"e1\")\n\n def test_single_contract(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([1.0] * 5), \"a\")\n b = net.add_node(np.array([1.0] * 5), \"b\")\n e = net.connect(a[0], b[0])\n c = net.contract(e)\n net.check_correct()\n val = c.get_tensor().numpy()\n self.assertAlmostEqual(val, 5.0)\n\n def test_disconnect_edge(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([1.0] * 5), \"a\")\n b = net.add_node(np.array([1.0] * 5), \"b\")\n e = net.connect(a[0], b[0])\n self.assertFalse(e.is_dangling())\n dangling_edge_1, dangling_edge_2 = net.disconnect(e)\n net.check_correct(check_connected=False)\n self.assertTrue(dangling_edge_1.is_dangling())\n self.assertTrue(dangling_edge_2.is_dangling())\n self.assertEqual(a.get_edge(0), dangling_edge_1)\n self.assertEqual(b.get_edge(0), dangling_edge_2)\n\n def test_set_tensor(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones(2))\n self.assertAllClose(a.get_tensor(), np.ones(2))\n a.set_tensor(np.zeros(2))\n self.assertAllClose(a.get_tensor(), np.zeros(2))\n\n def test_has_nondangling_edge(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones(2))\n self.assertFalse(a.has_nondangling_edge())\n b = net.add_node(np.ones((2, 2)))\n net.connect(b[0], b[1])\n self.assertTrue(b.has_nondangling_edge())\n\n def test_large_nodes(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros([5, 6, 7, 8, 9]), \"a\")\n b = net.add_node(np.zeros([5, 6, 7, 8, 9]), \"b\")\n for i in range(5):\n net.connect(a[i], b[i])\n net.check_correct()\n\n def test_small_matmul(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros([10, 10]), name=\"a\")\n b = net.add_node(np.zeros([10, 10]), name=\"b\")\n edge = net.connect(a[0], b[0], \"edge\")\n net.check_correct()\n c = net.contract(edge, name=\"a * b\")\n self.assertEqual(c.get_tensor().shape, [10, 10])\n net.check_correct()\n\n def test_direct_trace(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones([10, 10]), name=\"a\")\n edge = net.connect(a[0], a[1], \"edge\")\n net.check_correct()\n result = net._contract_trace(edge)\n net.check_correct()\n self.assertAlmostEqual(result.get_tensor().numpy(), 10.0)\n\n def test_double_trace(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones([10, 10, 10, 10]), name=\"a\")\n edge1 = net.connect(a[0], a[1], \"edge1\")\n edge2 = net.connect(a[2], a[3], \"edge2\")\n net.check_correct()\n net._contract_trace(edge1)\n net.check_correct()\n val = net._contract_trace(edge2)\n net.check_correct()\n self.assertAlmostEqual(val.get_tensor().numpy(), 100.0)\n\n def test_indirect_trace(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones([10, 10]), name=\"a\")\n edge = net.connect(a[0], a[1], \"edge\")\n net.check_correct()\n val = net.contract(edge)\n net.check_correct()\n self.assertAlmostEqual(val.get_tensor().numpy(), 10.0)\n\n def test_real_physics(self):\n # Calcuate the expected value in numpy\n a_vals = np.ones([2, 3, 4, 5])\n b_vals = np.ones([4, 6, 7])\n c_vals = np.ones([5, 6, 8])\n contract1 = np.tensordot(a_vals, b_vals, [[2], [0]])\n contract2 = np.tensordot(c_vals, contract1, [[0], [2]])\n final_result = np.trace(contract2, axis1=0, axis2=4)\n # Build the network\n net = tensornetwork.TensorNetwork()\n a = net.add_node(a_vals, name=\"T\")\n b = net.add_node(b_vals, name=\"A\")\n c = net.add_node(c_vals, name=\"B\")\n e1 = net.connect(a[2], b[0], \"edge\")\n e2 = net.connect(c[0], a[3], \"edge2\")\n e3 = net.connect(b[1], c[1], \"edge3\")\n net.check_correct()\n node_result = net.contract(e1)\n self.assertAllClose(node_result.get_tensor(), contract1)\n net.check_correct()\n node_result = net.contract(e2)\n self.assertAllClose(node_result.get_tensor(), contract2)\n net.check_correct()\n val = net.contract(e3)\n net.check_correct()\n self.assertAllClose(val.get_tensor(), final_result)\n\n def test_real_physics_with_tensors(self):\n # Calcuate the expected value in numpy\n a_vals = np.ones([2, 3, 4, 5])\n b_vals = np.ones([4, 6, 7])\n c_vals = np.ones([5, 6, 8])\n contract1 = np.tensordot(a_vals, b_vals, [[2], [0]])\n contract2 = np.tensordot(c_vals, contract1, [[0], [2]])\n final_result = np.trace(contract2, axis1=0, axis2=4)\n # Build the network\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tf.ones([2, 3, 4, 5]), name=\"T\")\n b = net.add_node(tf.ones([4, 6, 7]), name=\"A\")\n c = net.add_node(tf.ones([5, 6, 8]), name=\"B\")\n e1 = net.connect(a[2], b[0], \"edge\")\n e2 = net.connect(c[0], a[3], \"edge2\")\n e3 = net.connect(b[1], c[1], \"edge3\")\n net.check_correct()\n node_result = net.contract(e1)\n self.assertAllClose(node_result.get_tensor(), contract1)\n net.check_correct()\n node_result = net.contract(e2)\n self.assertAllClose(node_result.get_tensor(), contract2)\n net.check_correct()\n val = net.contract(e3)\n net.check_correct()\n self.assertAllClose(val.get_tensor(), final_result)\n\n def test_real_physics_naive_contraction(self):\n # Calcuate the expected value in numpy\n a_vals = np.ones([2, 3, 4, 5])\n b_vals = np.ones([4, 6, 7])\n c_vals = np.ones([5, 6, 8])\n contract1 = np.tensordot(a_vals, b_vals, [[2], [0]])\n contract2 = np.tensordot(c_vals, contract1, [[0], [2]])\n final_result = np.trace(contract2, axis1=0, axis2=4)\n # Build the network\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tf.ones([2, 3, 4, 5]), name=\"T\")\n b = net.add_node(tf.ones([4, 6, 7]), name=\"A\")\n c = net.add_node(tf.ones([5, 6, 8]), name=\"B\")\n e1 = net.connect(a[2], b[0], \"edge\")\n e2 = net.connect(c[0], a[3], \"edge2\")\n e3 = net.connect(b[1], c[1], \"edge3\")\n for edge in [e1, e2, e3]:\n net.contract(edge)\n val = net.get_final_node()\n self.assertEqual(val.get_tensor().shape, [8, 2, 3, 7])\n self.assertAllClose(val.get_tensor(), final_result)\n\n def test_with_tensors(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tf.eye(2) * 2, name=\"T\")\n b = net.add_node(tf.eye(2) * 3, name=\"A\")\n e1 = net.connect(a[0], b[0], \"edge\")\n e2 = net.connect(a[1], b[1], \"edge2\")\n net.check_correct()\n net.contract(e1)\n net.check_correct()\n val = net.contract(e2)\n net.check_correct()\n self.assertAlmostEqual(val.get_tensor().numpy(), 12.0)\n\n def test_contract_dangling_edge(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([1]))\n e = a[0]\n with self.assertRaises(ValueError):\n net.contract(e)\n\n def test_double_edge_contract(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2))\n e = net.connect(a[0], a[1], name=\"edge\")\n net.contract(e)\n with self.assertRaises(ValueError):\n net.contract(e)\n\n def test_contract_trace_dangling_edge(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([1]))\n e = a[0]\n with self.assertRaises(ValueError):\n net._contract_trace(e)\n\n def test_node2_contract_trace(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros([3, 3, 1]))\n b = net.add_node(np.zeros([1]))\n net.connect(b[0], a[2])\n trace_edge = net.connect(a[0], a[1])\n net._contract_trace(trace_edge)\n net.check_correct()\n\n def test_contract_fall_through_name(self):\n net = tensornetwork.TensorNetwork()\n node = net.add_node(np.eye(2), name=\"Identity Matrix\")\n self.assertEqual(node.name, \"Identity Matrix\")\n edge = net.connect(node[0], node[1], name=\"Trace Edge\")\n self.assertEqual(edge.name, \"Trace Edge\")\n final_result = net.contract(edge, name=\"Trace Of Identity\")\n self.assertEqual(final_result.name, \"Trace Of Identity\")\n\n def test_non_connected(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([2, 2]))\n b = net.add_node(np.array([2, 2]))\n net.connect(a[0], b[0])\n c = net.add_node(np.array([2, 2]))\n d = net.add_node(np.array([2, 2]))\n net.connect(c[0], d[0])\n with self.assertRaises(ValueError):\n net.check_connected()\n\n def test_node_get_dim_bad_axis(self):\n node = tensornetwork.Node(np.eye(2), \"a\", axis_names=[\"1\", \"2\"])\n with self.assertRaises(ValueError):\n node.get_dimension(10)\n\n def test_bad_trace_contract(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([2]))\n b = net.add_node(np.array([2]))\n e = net.connect(a[0], b[0])\n with self.assertRaises(ValueError):\n net._contract_trace(e)\n\n def test_double_edge_axis(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.array([2]), name=\"a\")\n b = net.add_node(np.array([2]), name=\"b\")\n net.connect(a[0], b[0])\n with self.assertRaises(ValueError):\n net.connect(a[0], b[0])\n\n def test_named_axis(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2), axis_names=[\"alpha\", \"beta\"])\n e = net.connect(a[\"alpha\"], a[\"beta\"])\n b = net.contract(e)\n self.assertAlmostEqual(b.get_tensor().numpy(), 2.0)\n\n def test_mixed_named_axis(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2) * 2.0, axis_names=[\"alpha\", \"beta\"])\n b = net.add_node(np.eye(2) * 3.0)\n e1 = net.connect(a[\"alpha\"], b[0])\n # Axes should still be indexable by numbers even with naming.\n e2 = net.connect(a[1], b[1])\n net.contract(e1)\n result = net.contract(e2)\n self.assertAlmostEqual(result.get_tensor().numpy(), 12.0)\n\n def test_duplicate_name(self):\n net = tensornetwork.TensorNetwork()\n with self.assertRaises(ValueError):\n net.add_node(np.eye(2), axis_names=[\"test\", \"test\"])\n\n def test_bad_axis_name_length(self):\n net = tensornetwork.TensorNetwork()\n with self.assertRaises(ValueError):\n # This should have 2 names, not 1.\n net.add_node(np.eye(2), axis_names=[\"need_2_names\"])\n\n def test_bad_axis_name_connect(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2), axis_names=[\"test\", \"names\"])\n with self.assertRaises(ValueError):\n a.get_edge(\"bad_name\")\n\n def test_node_edge_ordering(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4)))\n e2 = a[0]\n e3 = a[1]\n e4 = a[2]\n self.assertEqual(a.get_tensor().shape, (2, 3, 4))\n a.reorder_edges([e4, e2, e3])\n net.check_correct()\n self.assertEqual(a.get_tensor().shape, (4, 2, 3))\n self.assertEqual(e2.axis1, 1)\n self.assertEqual(e3.axis1, 2)\n self.assertEqual(e4.axis1, 0)\n\n def test_trace_edge_ordering(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 2, 3)))\n e2 = net.connect(a[1], a[0])\n e3 = a[2]\n with self.assertRaises(ValueError):\n a.reorder_edges([e2, e3])\n\n def test_mismatch_edge_ordering(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3)))\n e2_a = a[0]\n b = net.add_node(np.zeros((2,)))\n e_b = b[0]\n with self.assertRaises(ValueError):\n a.reorder_edges([e2_a, e_b])\n\n def test_complicated_edge_reordering(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4)))\n b = net.add_node(np.zeros((2, 5)))\n c = net.add_node(np.zeros((3,)))\n d = net.add_node(np.zeros((4, 5)))\n e_ab = net.connect(a[0], b[0])\n e_bd = net.connect(b[1], d[1])\n e_ac = net.connect(a[1], c[0])\n e_ad = net.connect(a[2], d[0])\n net.contract(e_bd)\n a.reorder_edges([e_ac, e_ab, e_ad])\n net.check_correct()\n self.assertEqual(a.get_tensor().shape, (3, 2, 4))\n\n def test_edge_reorder_axis_names(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4, 5)), axis_names=[\"a\", \"b\", \"c\", \"d\"])\n edge_a = a[\"a\"]\n edge_b = a[\"b\"]\n edge_c = a[\"c\"]\n edge_d = a[\"d\"]\n a.reorder_edges([edge_c, edge_b, edge_d, edge_a])\n self.assertEqual(a.get_tensor().shape, (4, 3, 5, 2))\n self.assertEqual(a.axis_names, [\"c\", \"b\", \"d\", \"a\"])\n\n def test_outer_product(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones((2, 4, 5)), name=\"A\")\n b = net.add_node(np.ones((4, 3, 6)), name=\"B\")\n c = net.add_node(np.ones((3, 2)), name=\"C\")\n net.connect(a[1], b[0])\n net.connect(a[0], c[1])\n net.connect(b[1], c[0])\n # Purposely leave b's 3rd axis undefined.\n d = net.outer_product(a, b, name=\"D\")\n net.check_correct()\n self.assertEqual(d.get_tensor().shape, (2, 4, 5, 4, 3, 6))\n self.assertAllClose(d.get_tensor().numpy(), np.ones((2, 4, 5, 4, 3, 6)))\n self.assertEqual(d.name, \"D\")\n\n def test_outer_product_final_nodes(self):\n net = tensornetwork.TensorNetwork()\n edges = []\n for i in range(1, 5):\n edges.append(net.add_node(tf.ones(i))[0])\n final_node = net.outer_product_final_nodes(edges)\n self.assertAllClose(final_node.get_tensor(), np.ones([1, 2, 3, 4]))\n self.assertEqual(final_node.get_all_edges(), edges)\n\n def test_outer_product_final_nodes_not_contracted(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones(2))\n b = net.add_node(np.ones(2))\n e = net.connect(a[0], b[0])\n with self.assertRaises(ValueError):\n net.outer_product_final_nodes([e])\n\n def test_add_axis_names(self):\n a = tensornetwork.Node(np.eye(2), \"A\", [\"ignore1\", \"ignore2\"])\n a.add_axis_names([\"a\", \"b\"])\n self.assertEqual(a.axis_names, [\"a\", \"b\"])\n\n def test_reorder_axes(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4)))\n b = net.add_node(np.zeros((3, 4, 5)))\n c = net.add_node(np.zeros((2, 4, 5)))\n net.connect(a[0], c[0])\n net.connect(b[0], a[1])\n net.connect(a[2], c[1])\n net.connect(b[2], c[2])\n a.reorder_axes([2, 0, 1])\n net.check_correct()\n self.assertEqual(a.get_tensor().shape, (4, 2, 3))\n\n def test_flattening_standard_edges(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 5)), name=\"A\")\n b = net.add_node(np.zeros((2, 3, 4, 5)), name=\"B\")\n e1 = net.connect(a[0], b[0], \"Edge_1_1\")\n e2 = net.connect(a[2], b[3], \"Edge_2_3\")\n edge_a_1 = a[1]\n edge_b_1 = b[1]\n edge_b_2 = b[2]\n new_edge = net.flatten_edges([e1, e2], new_edge_name=\"New Edge\")\n self.assertEqual(a.get_tensor().shape, (3, 10))\n self.assertEqual(b.get_tensor().shape, (3, 4, 10))\n self.assertEqual(a.edges, [edge_a_1, new_edge])\n self.assertEqual(b.edges, [edge_b_1, edge_b_2, new_edge])\n net.check_correct()\n\n def test_flattening_dangling_edges(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4, 5)), name=\"A\")\n e1 = a[0]\n e2 = a[1]\n e3 = a[2]\n e4 = a[3]\n flattened_edge = net.flatten_edges([e1, e3], new_edge_name=\"New Edge\")\n self.assertEqual(a.get_tensor().shape, (3, 5, 8))\n self.assertEqual(a.edges, [e2, e4, flattened_edge])\n self.assertEqual(flattened_edge.name, \"New Edge\")\n net.check_correct()\n\n def test_flatten_edges_different_nodes(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.eye(2))\n b = net.add_node(np.eye(2))\n c = net.add_node(np.eye(2))\n e1 = net.connect(a[0], b[0])\n e2 = net.connect(a[1], c[0])\n net.connect(b[1], c[1])\n with self.assertRaises(ValueError):\n net.flatten_edges([e1, e2])\n\n def test_flatten_trace_edges(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.zeros((2, 3, 4, 3, 5, 5)))\n c = net.add_node(np.zeros((2, 4)))\n e1 = net.connect(a[1], a[3])\n e2 = net.connect(a[4], a[5])\n external_1 = net.connect(a[0], c[0])\n external_2 = net.connect(c[1], a[2])\n new_edge = net.flatten_edges([e1, e2], \"New Edge\")\n net.check_correct()\n self.assertEqual(a.get_tensor().shape, (2, 4, 15, 15))\n self.assertEqual(a.edges, [external_1, external_2, new_edge, new_edge])\n self.assertEqual(new_edge.name, \"New Edge\")\n\n def test_flatten_consistent_result(self):\n net_noflat = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(3, 5, 5, 6))\n b_val = np.random.normal(size=(5, 6, 4, 5))\n # Create non flattened example to compare against.\n a_noflat = net_noflat.add_node(a_val)\n b_noflat = net_noflat.add_node(b_val)\n e1 = net_noflat.connect(a_noflat[1], b_noflat[3])\n e2 = net_noflat.connect(a_noflat[3], b_noflat[1])\n e3 = net_noflat.connect(a_noflat[2], b_noflat[0])\n a_dangling_noflat = a_noflat[0]\n b_dangling_noflat = b_noflat[2]\n for edge in [e1, e2, e3]:\n net_noflat.contract(edge)\n noflat_result_node = net_noflat.get_final_node()\n noflat_result_node.reorder_edges([a_dangling_noflat, b_dangling_noflat])\n noflat_result = noflat_result_node.get_tensor().numpy()\n # Create network with flattening\n net_flat = tensornetwork.TensorNetwork()\n a_flat = net_flat.add_node(a_val)\n b_flat = net_flat.add_node(b_val)\n e1 = net_flat.connect(a_flat[1], b_flat[3])\n e2 = net_flat.connect(a_flat[3], b_flat[1])\n e3 = net_flat.connect(a_flat[2], b_flat[0])\n a_dangling_flat = a_flat[0]\n b_dangling_flat = b_flat[2]\n final_edge = net_flat.flatten_edges([e1, e2, e3])\n flat_result_node = net_flat.contract(final_edge)\n flat_result_node.reorder_edges([a_dangling_flat, b_dangling_flat])\n flat_result = flat_result_node.get_tensor().numpy()\n self.assertAllClose(flat_result, noflat_result)\n\n def test_flatten_consistent_tensor(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(2, 3, 4, 5))\n b_val = np.random.normal(size=(3, 5, 4, 2))\n a = net.add_node(a_val)\n b = net.add_node(b_val)\n e1 = net.connect(a[0], b[3])\n e2 = net.connect(b[1], a[3])\n e3 = net.connect(a[1], b[0])\n net.flatten_edges([e3, e1, e2])\n net.check_correct()\n\n # Check expected values.\n a_final = np.reshape(np.transpose(a_val, (2, 1, 0, 3)), (4, 30))\n b_final = np.reshape(np.transpose(b_val, (2, 0, 3, 1)), (4, 30))\n self.assertAllClose(a.get_tensor().numpy(), a_final)\n self.assertAllClose(b.get_tensor().numpy(), b_final)\n\n def test_flatten_trace_consistent_result(self):\n net_noflat = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(5, 6, 6, 7, 5, 7))\n a_noflat = net_noflat.add_node(a_val)\n e1 = net_noflat.connect(a_noflat[0], a_noflat[4])\n e2 = net_noflat.connect(a_noflat[1], a_noflat[2])\n e3 = net_noflat.connect(a_noflat[3], a_noflat[5])\n for edge in [e1, e2, e3]:\n net_noflat.contract(edge)\n noflat_result = net_noflat.get_final_node().get_tensor().numpy()\n # Create network with flattening\n net_flat = tensornetwork.TensorNetwork()\n a_flat = net_flat.add_node(a_val)\n e1 = net_flat.connect(a_flat[0], a_flat[4])\n e2 = net_flat.connect(a_flat[1], a_flat[2])\n e3 = net_flat.connect(a_flat[3], a_flat[5])\n final_edge = net_flat.flatten_edges([e1, e2, e3])\n flat_result = net_flat.contract(final_edge).get_tensor().numpy()\n self.assertAllClose(flat_result, noflat_result)\n\n def test_flatten_trace_consistent_tensor(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(5, 3, 4, 4, 5))\n a = net.add_node(a_val)\n e1 = net.connect(a[0], a[4])\n e2 = net.connect(a[3], a[2])\n net.flatten_edges([e2, e1])\n net.check_correct()\n # Check expected values.\n a_final = np.reshape(np.transpose(a_val, (1, 2, 0, 3, 4)), (3, 20, 20))\n self.assertAllClose(a.get_tensor().numpy(), a_final)\n\n def test_add_subnetwork(self):\n net1 = tensornetwork.TensorNetwork()\n net2 = tensornetwork.TensorNetwork()\n a = net1.add_node(np.eye(2) * 2)\n b = net1.add_node(np.eye(2) * 3)\n e1 = net1.connect(a[0], b[0])\n c = net2.add_node(np.eye(2) * 4)\n net2.add_subnetwork(net1)\n self.assertIn(a, net2.nodes_set)\n self.assertIn(b, net2.nodes_set)\n e2 = net2.connect(c[0], a[1])\n e3 = net2.connect(c[1], b[1])\n net2.check_correct()\n for edge in [e1, e2, e3]:\n net2.contract(edge)\n result = net2.get_final_node()\n self.assertAllClose(result.get_tensor().numpy(), 48.0)\n\n def test_merge_networks(self):\n net1 = tensornetwork.TensorNetwork()\n net2 = tensornetwork.TensorNetwork()\n a = net1.add_node(np.eye(2) * 2)\n b = net1.add_node(np.eye(2) * 3)\n e1 = net1.connect(a[0], b[0])\n c = net2.add_node(np.eye(2) * 4)\n net3 = tensornetwork.TensorNetwork.merge_networks([net1, net2])\n self.assertIn(a, net3.nodes_set)\n self.assertIn(b, net3.nodes_set)\n e2 = net3.connect(c[0], a[1])\n e3 = net3.connect(c[1], b[1])\n net3.check_correct()\n for edge in [e1, e2, e3]:\n net3.contract(edge)\n result = net3.get_final_node()\n self.assertAllClose(result.get_tensor().numpy(), 48.0)\n\n def test_flatten_edges_between(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones((3, 4, 5)))\n b = net.add_node(np.ones((5, 4, 3)))\n net.connect(a[0], b[2])\n net.connect(a[1], b[1])\n net.connect(a[2], b[0])\n net.flatten_edges_between(a, b)\n net.check_correct()\n self.assertAllClose(a.get_tensor().numpy(), np.ones((60,)))\n self.assertAllClose(b.get_tensor().numpy(), np.ones((60,)))\n\n def test_flatten_edges_between_no_edges(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones((3)))\n b = net.add_node(np.ones((3)))\n self.assertEqual(net.flatten_edges_between(a, b), None)\n\n def test_flatten_all_edges(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones((3, 3, 5, 6, 2, 2)))\n b = net.add_node(np.ones((5, 6, 7)))\n c = net.add_node(np.ones((7,)))\n trace_edge1 = net.connect(a[0], a[1])\n trace_edge2 = net.connect(a[4], a[5])\n split_edge1 = net.connect(a[2], b[0])\n split_edge2 = net.connect(a[3], b[1])\n ok_edge = net.connect(b[2], c[0])\n flat_edges = net.flatten_all_edges()\n net.check_correct()\n self.assertEqual(len(flat_edges), 3)\n self.assertNotIn(trace_edge1, flat_edges)\n self.assertNotIn(trace_edge2, flat_edges)\n self.assertNotIn(split_edge1, flat_edges)\n self.assertNotIn(split_edge2, flat_edges)\n self.assertIn(ok_edge, flat_edges)\n\n def test_contract_between(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(2, 3, 4, 5))\n b_val = np.random.normal(size=(3, 5, 4, 2))\n a = net.add_node(a_val)\n b = net.add_node(b_val)\n net.connect(a[0], b[3])\n net.connect(b[1], a[3])\n net.connect(a[1], b[0])\n edge_a = a[2]\n edge_b = b[2]\n c = net.contract_between(a, b, name=\"New Node\")\n c.reorder_edges([edge_a, edge_b])\n net.check_correct()\n # Check expected values.\n a_flat = np.reshape(np.transpose(a_val, (2, 1, 0, 3)), (4, 30))\n b_flat = np.reshape(np.transpose(b_val, (2, 0, 3, 1)), (4, 30))\n final_val = np.matmul(a_flat, b_flat.T)\n self.assertAllClose(c.get_tensor().numpy(), final_val)\n self.assertEqual(c.name, \"New Node\")\n\n def test_contract_between_outer_product(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(2, 3, 4))\n b_val = np.random.normal(size=(5, 6, 7))\n a = net.add_node(a_val)\n b = net.add_node(b_val)\n c = net.contract_between(a, b, allow_outer_product=True)\n self.assertEqual(c.get_tensor().shape, (2, 3, 4, 5, 6, 7))\n\n def test_contract_between_no_outer_product(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(2, 3, 4))\n b_val = np.random.normal(size=(5, 6, 7))\n a = net.add_node(a_val)\n b = net.add_node(b_val)\n with self.assertRaises(ValueError):\n net.contract_between(a, b)\n\n def test_contract_between_trace_edges(self):\n net = tensornetwork.TensorNetwork()\n a_val = np.random.normal(size=(3, 3))\n final_val = np.trace(a_val)\n a = net.add_node(a_val)\n net.connect(a[0], a[1])\n b = net.contract_between(a, a)\n net.check_correct()\n self.assertAllClose(b.get_tensor().numpy(), final_val)\n\n def test_join_dangling(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(np.ones((3,)))\n b = net.add_node(np.ones((3,)))\n net.connect(a[0], b[0])\n net.check_correct()\n\n def test_dynamic_network_sizes(self):\n\n @tf.contrib.eager.defun\n def f(x, n):\n x_slice = x[:n]\n net = tensornetwork.TensorNetwork()\n n1 = net.add_node(x_slice)\n n2 = net.add_node(x_slice)\n e = net.connect(n1[0], n2[0])\n return net.contract(e).get_tensor()\n\n x = tf.ones(10)\n self.assertAllClose(f(x, tf.convert_to_tensor(2)), 2.0)\n self.assertAllClose(f(x, tf.convert_to_tensor(3)), 3.0)\n\n def test_dynamic_network_sizes_flatten_standard(self):\n\n @tf.contrib.eager.defun\n def f(x, n):\n x_slice = x[..., :n]\n net = tensornetwork.TensorNetwork()\n n1 = net.add_node(x_slice)\n n2 = net.add_node(x_slice)\n net.connect(n1[0], n2[0])\n net.connect(n1[1], n2[1])\n net.connect(n1[2], n2[2])\n return net.contract(net.flatten_edges_between(n1, n2)).get_tensor()\n\n x = tf.ones((3, 4, 5))\n self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)\n self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)\n\n def test_dynamic_network_sizes_flatten_trace(self):\n\n @tf.contrib.eager.defun\n def f(x, n):\n x_slice = x[..., :n]\n net = tensornetwork.TensorNetwork()\n n1 = net.add_node(x_slice)\n net.connect(n1[0], n1[2])\n net.connect(n1[1], n1[3])\n return net.contract(net.flatten_edges_between(n1, n1)).get_tensor()\n\n x = tf.ones((3, 4, 3, 4, 5))\n self.assertAllClose(f(x, tf.convert_to_tensor(2)), tf.ones((2,)) * 12)\n self.assertAllClose(f(x, tf.convert_to_tensor(3)), tf.ones((3,)) * 12)\n\n def test_split_node(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tf.zeros((2, 3, 4, 5, 6)))\n left_edges = []\n for i in range(3):\n left_edges.append(a[i])\n right_edges = []\n for i in range(3, 5):\n right_edges.append(a[i])\n left, right, _ = net.split_node(a, left_edges, right_edges)\n net.check_correct()\n self.assertAllClose(left.get_tensor(), np.zeros((2, 3, 4, 24)))\n self.assertAllClose(right.get_tensor(), np.zeros((24, 5, 6)))\n\n def test_split_node_mixed_order(self):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tf.zeros((2, 3, 4, 5, 6)))\n left_edges = []\n for i in [0, 2, 4]:\n left_edges.append(a[i])\n right_edges = []\n for i in [1, 3]:\n right_edges.append(a[i])\n left, right, _ = net.split_node(a, left_edges, right_edges)\n net.check_correct()\n self.assertAllClose(left.get_tensor(), np.zeros((2, 4, 6, 15)))\n self.assertAllClose(right.get_tensor(), np.zeros((15, 3, 5)))\n\n def test_split_node_full_svd(self):\n net = tensornetwork.TensorNetwork()\n random_matrix = np.random.rand(10, 10)\n unitary1, _, unitary2 = np.linalg.svd(random_matrix)\n singular_values = np.array(range(10))\n val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))\n a = net.add_node(val)\n e1 = a[0]\n e2 = a[1]\n _, s, _, _, = net.split_node_full_svd(a, [e1], [e2])\n net.check_correct()\n self.assertAllClose(s.get_tensor(), np.diag(np.arange(9, -1, -1)))\n\n def test_batch_usage(self):\n def build_tensornetwork(tensors):\n net = tensornetwork.TensorNetwork()\n a = net.add_node(tensors[0])\n b = net.add_node(tensors[1])\n e = net.connect(a[0], b[0])\n return net.contract(e).get_tensor()\n\n tensors = [tf.ones((5, 10)), tf.ones((5, 10))]\n result = tf.map_fn(build_tensornetwork, tensors, dtype=tf.float32)\n self.assertAllClose(result, tf.ones(5) * 10)\n\nif __name__ == \"__main__\":\n tf.test.main()\n\n" ]
[ [ "tensorflow.convert_to_tensor", "numpy.diag", "tensorflow.zeros", "tensorflow.map_fn", "numpy.trace", "numpy.linalg.svd", "numpy.arange", "numpy.eye", "numpy.matmul", "tensorflow.test.main", "numpy.tensordot", "numpy.zeros", "tensorflow.enable_v2_behavior", "numpy.random.rand", "numpy.transpose", "numpy.array", "tensorflow.ones", "tensorflow.eye", "numpy.ones", "numpy.random.normal" ] ]